summaryrefslogtreecommitdiffstats
path: root/lib/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible')
-rw-r--r--lib/ansible/__init__.py31
-rw-r--r--lib/ansible/__main__.py41
-rw-r--r--lib/ansible/_vendor/__init__.py46
-rw-r--r--lib/ansible/cli/__init__.py689
-rwxr-xr-xlib/ansible/cli/adhoc.py207
-rw-r--r--lib/ansible/cli/arguments/__init__.py5
-rw-r--r--lib/ansible/cli/arguments/option_helpers.py391
-rwxr-xr-xlib/ansible/cli/config.py551
-rwxr-xr-xlib/ansible/cli/console.py604
-rwxr-xr-xlib/ansible/cli/doc.py1393
-rwxr-xr-xlib/ansible/cli/galaxy.py1865
-rwxr-xr-xlib/ansible/cli/inventory.py417
-rwxr-xr-xlib/ansible/cli/playbook.py231
-rwxr-xr-xlib/ansible/cli/pull.py364
-rw-r--r--lib/ansible/cli/scripts/__init__.py0
-rwxr-xr-xlib/ansible/cli/scripts/ansible_connection_cli_stub.py354
-rwxr-xr-xlib/ansible/cli/vault.py480
-rw-r--r--lib/ansible/collections/__init__.py29
-rw-r--r--lib/ansible/collections/list.py114
-rw-r--r--lib/ansible/compat/__init__.py26
-rw-r--r--lib/ansible/compat/selectors/__init__.py32
-rw-r--r--lib/ansible/config/__init__.py0
-rw-r--r--lib/ansible/config/ansible_builtin_runtime.yml9742
-rw-r--r--lib/ansible/config/base.yml2067
-rw-r--r--lib/ansible/config/manager.py607
-rw-r--r--lib/ansible/constants.py191
-rw-r--r--lib/ansible/context.py57
-rw-r--r--lib/ansible/errors/__init__.py373
-rw-r--r--lib/ansible/errors/yaml_strings.py140
-rw-r--r--lib/ansible/executor/__init__.py20
-rw-r--r--lib/ansible/executor/action_write_locks.py46
-rw-r--r--lib/ansible/executor/discovery/__init__.py0
-rw-r--r--lib/ansible/executor/discovery/python_target.py48
-rw-r--r--lib/ansible/executor/interpreter_discovery.py207
-rw-r--r--lib/ansible/executor/module_common.py1428
-rw-r--r--lib/ansible/executor/play_iterator.py652
-rw-r--r--lib/ansible/executor/playbook_executor.py335
-rw-r--r--lib/ansible/executor/powershell/__init__.py0
-rw-r--r--lib/ansible/executor/powershell/async_watchdog.ps1117
-rw-r--r--lib/ansible/executor/powershell/async_wrapper.ps1174
-rw-r--r--lib/ansible/executor/powershell/become_wrapper.ps1163
-rw-r--r--lib/ansible/executor/powershell/bootstrap_wrapper.ps113
-rw-r--r--lib/ansible/executor/powershell/coverage_wrapper.ps1199
-rw-r--r--lib/ansible/executor/powershell/exec_wrapper.ps1237
-rw-r--r--lib/ansible/executor/powershell/module_manifest.py402
-rw-r--r--lib/ansible/executor/powershell/module_powershell_wrapper.ps175
-rw-r--r--lib/ansible/executor/powershell/module_script_wrapper.ps122
-rw-r--r--lib/ansible/executor/powershell/module_wrapper.ps1226
-rw-r--r--lib/ansible/executor/process/__init__.py20
-rw-r--r--lib/ansible/executor/process/worker.py226
-rw-r--r--lib/ansible/executor/stats.py100
-rw-r--r--lib/ansible/executor/task_executor.py1239
-rw-r--r--lib/ansible/executor/task_queue_manager.py456
-rw-r--r--lib/ansible/executor/task_result.py154
-rw-r--r--lib/ansible/galaxy/__init__.py72
-rw-r--r--lib/ansible/galaxy/api.py913
-rw-r--r--lib/ansible/galaxy/collection/__init__.py1836
-rw-r--r--lib/ansible/galaxy/collection/concrete_artifact_manager.py755
-rw-r--r--lib/ansible/galaxy/collection/galaxy_api_proxy.py216
-rw-r--r--lib/ansible/galaxy/collection/gpg.py282
-rw-r--r--lib/ansible/galaxy/data/apb/Dockerfile.j29
-rw-r--r--lib/ansible/galaxy/data/apb/Makefile.j221
-rw-r--r--lib/ansible/galaxy/data/apb/README.md38
-rw-r--r--lib/ansible/galaxy/data/apb/apb.yml.j213
-rw-r--r--lib/ansible/galaxy/data/apb/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/apb/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/meta/main.yml.j244
-rw-r--r--lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j28
-rw-r--r--lib/ansible/galaxy/data/apb/playbooks/provision.yml.j28
-rw-r--r--lib/ansible/galaxy/data/apb/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/apb/tests/ansible.cfg2
-rw-r--r--lib/ansible/galaxy/data/apb/tests/inventory3
-rw-r--r--lib/ansible/galaxy/data/apb/tests/test.yml.j27
-rw-r--r--lib/ansible/galaxy/data/apb/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/collections_galaxy_meta.yml120
-rw-r--r--lib/ansible/galaxy/data/container/README.md49
-rw-r--r--lib/ansible/galaxy/data/container/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/container/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/meta/container.yml.j211
-rw-r--r--lib/ansible/galaxy/data/container/meta/main.yml.j252
-rw-r--r--lib/ansible/galaxy/data/container/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/container/tests/ansible.cfg2
-rw-r--r--lib/ansible/galaxy/data/container/tests/inventory3
-rw-r--r--lib/ansible/galaxy/data/container/tests/test.yml.j27
-rw-r--r--lib/ansible/galaxy/data/container/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/collection/README.md.j23
-rw-r--r--lib/ansible/galaxy/data/default/collection/docs/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/collection/galaxy.yml.j216
-rw-r--r--lib/ansible/galaxy/data/default/collection/meta/runtime.yml52
-rw-r--r--lib/ansible/galaxy/data/default/collection/plugins/README.md.j231
-rw-r--r--lib/ansible/galaxy/data/default/collection/roles/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/README.md38
-rw-r--r--lib/ansible/galaxy/data/default/role/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/meta/main.yml.j255
-rw-r--r--lib/ansible/galaxy/data/default/role/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/tests/inventory2
-rw-r--r--lib/ansible/galaxy/data/default/role/tests/test.yml.j25
-rw-r--r--lib/ansible/galaxy/data/default/role/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/README.md38
-rw-r--r--lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/network/library/example_command.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/library/example_config.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/library/example_facts.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/meta/main.yml.j252
-rw-r--r--lib/ansible/galaxy/data/network/module_utils/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/netconf_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/network/terminal_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/tests/inventory2
-rw-r--r--lib/ansible/galaxy/data/network/tests/test.yml.j214
-rw-r--r--lib/ansible/galaxy/data/network/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/dependency_resolution/__init__.py55
-rw-r--r--lib/ansible/galaxy/dependency_resolution/dataclasses.py573
-rw-r--r--lib/ansible/galaxy/dependency_resolution/errors.py19
-rw-r--r--lib/ansible/galaxy/dependency_resolution/providers.py548
-rw-r--r--lib/ansible/galaxy/dependency_resolution/reporters.py21
-rw-r--r--lib/ansible/galaxy/dependency_resolution/resolvers.py21
-rw-r--r--lib/ansible/galaxy/dependency_resolution/versioning.py70
-rw-r--r--lib/ansible/galaxy/role.py439
-rw-r--r--lib/ansible/galaxy/token.py187
-rw-r--r--lib/ansible/galaxy/user_agent.py23
-rw-r--r--lib/ansible/inventory/__init__.py0
-rw-r--r--lib/ansible/inventory/data.py283
-rw-r--r--lib/ansible/inventory/group.py288
-rw-r--r--lib/ansible/inventory/helpers.py40
-rw-r--r--lib/ansible/inventory/host.py169
-rw-r--r--lib/ansible/inventory/manager.py752
-rw-r--r--lib/ansible/keyword_desc.yml70
-rw-r--r--lib/ansible/module_utils/__init__.py0
-rw-r--r--lib/ansible/module_utils/_text.py15
-rw-r--r--lib/ansible/module_utils/ansible_release.py24
-rw-r--r--lib/ansible/module_utils/api.py166
-rw-r--r--lib/ansible/module_utils/basic.py2148
-rw-r--r--lib/ansible/module_utils/common/__init__.py0
-rw-r--r--lib/ansible/module_utils/common/_collections_compat.py46
-rw-r--r--lib/ansible/module_utils/common/_json_compat.py16
-rw-r--r--lib/ansible/module_utils/common/_utils.py40
-rw-r--r--lib/ansible/module_utils/common/arg_spec.py311
-rw-r--r--lib/ansible/module_utils/common/collections.py112
-rw-r--r--lib/ansible/module_utils/common/dict_transformations.py154
-rw-r--r--lib/ansible/module_utils/common/file.py205
-rw-r--r--lib/ansible/module_utils/common/json.py86
-rw-r--r--lib/ansible/module_utils/common/locale.py61
-rw-r--r--lib/ansible/module_utils/common/network.py161
-rw-r--r--lib/ansible/module_utils/common/parameters.py940
-rw-r--r--lib/ansible/module_utils/common/process.py46
-rw-r--r--lib/ansible/module_utils/common/respawn.py98
-rw-r--r--lib/ansible/module_utils/common/sys_info.py157
-rw-r--r--lib/ansible/module_utils/common/text/__init__.py0
-rw-r--r--lib/ansible/module_utils/common/text/converters.py322
-rw-r--r--lib/ansible/module_utils/common/text/formatters.py114
-rw-r--r--lib/ansible/module_utils/common/validation.py578
-rw-r--r--lib/ansible/module_utils/common/warnings.py40
-rw-r--r--lib/ansible/module_utils/common/yaml.py48
-rw-r--r--lib/ansible/module_utils/compat/__init__.py0
-rw-r--r--lib/ansible/module_utils/compat/_selectors2.py655
-rw-r--r--lib/ansible/module_utils/compat/importlib.py18
-rw-r--r--lib/ansible/module_utils/compat/paramiko.py22
-rw-r--r--lib/ansible/module_utils/compat/selectors.py57
-rw-r--r--lib/ansible/module_utils/compat/selinux.py113
-rw-r--r--lib/ansible/module_utils/compat/typing.py25
-rw-r--r--lib/ansible/module_utils/compat/version.py343
-rw-r--r--lib/ansible/module_utils/connection.py222
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.AccessToken.cs460
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Basic.cs1489
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Become.cs655
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Privilege.cs443
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Process.cs461
-rw-r--r--lib/ansible/module_utils/csharp/__init__.py0
-rw-r--r--lib/ansible/module_utils/distro/__init__.py56
-rw-r--r--lib/ansible/module_utils/distro/_distro.py1416
-rw-r--r--lib/ansible/module_utils/errors.py123
-rw-r--r--lib/ansible/module_utils/facts/__init__.py34
-rw-r--r--lib/ansible/module_utils/facts/ansible_collector.py158
-rw-r--r--lib/ansible/module_utils/facts/collector.py402
-rw-r--r--lib/ansible/module_utils/facts/compat.py87
-rw-r--r--lib/ansible/module_utils/facts/default_collectors.py177
-rw-r--r--lib/ansible/module_utils/facts/hardware/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/hardware/aix.py266
-rw-r--r--lib/ansible/module_utils/facts/hardware/base.py68
-rw-r--r--lib/ansible/module_utils/facts/hardware/darwin.py159
-rw-r--r--lib/ansible/module_utils/facts/hardware/dragonfly.py26
-rw-r--r--lib/ansible/module_utils/facts/hardware/freebsd.py241
-rw-r--r--lib/ansible/module_utils/facts/hardware/hpux.py165
-rw-r--r--lib/ansible/module_utils/facts/hardware/hurd.py53
-rw-r--r--lib/ansible/module_utils/facts/hardware/linux.py869
-rw-r--r--lib/ansible/module_utils/facts/hardware/netbsd.py184
-rw-r--r--lib/ansible/module_utils/facts/hardware/openbsd.py184
-rw-r--r--lib/ansible/module_utils/facts/hardware/sunos.py286
-rw-r--r--lib/ansible/module_utils/facts/namespace.py51
-rw-r--r--lib/ansible/module_utils/facts/network/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/network/aix.py145
-rw-r--r--lib/ansible/module_utils/facts/network/base.py72
-rw-r--r--lib/ansible/module_utils/facts/network/darwin.py49
-rw-r--r--lib/ansible/module_utils/facts/network/dragonfly.py33
-rw-r--r--lib/ansible/module_utils/facts/network/fc_wwn.py111
-rw-r--r--lib/ansible/module_utils/facts/network/freebsd.py33
-rw-r--r--lib/ansible/module_utils/facts/network/generic_bsd.py321
-rw-r--r--lib/ansible/module_utils/facts/network/hpux.py82
-rw-r--r--lib/ansible/module_utils/facts/network/hurd.py87
-rw-r--r--lib/ansible/module_utils/facts/network/iscsi.py115
-rw-r--r--lib/ansible/module_utils/facts/network/linux.py327
-rw-r--r--lib/ansible/module_utils/facts/network/netbsd.py48
-rw-r--r--lib/ansible/module_utils/facts/network/nvme.py57
-rw-r--r--lib/ansible/module_utils/facts/network/openbsd.py42
-rw-r--r--lib/ansible/module_utils/facts/network/sunos.py116
-rw-r--r--lib/ansible/module_utils/facts/other/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/other/facter.py87
-rw-r--r--lib/ansible/module_utils/facts/other/ohai.py74
-rw-r--r--lib/ansible/module_utils/facts/packages.py86
-rw-r--r--lib/ansible/module_utils/facts/sysctl.py62
-rw-r--r--lib/ansible/module_utils/facts/system/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/system/apparmor.py41
-rw-r--r--lib/ansible/module_utils/facts/system/caps.py62
-rw-r--r--lib/ansible/module_utils/facts/system/chroot.py49
-rw-r--r--lib/ansible/module_utils/facts/system/cmdline.py81
-rw-r--r--lib/ansible/module_utils/facts/system/date_time.py70
-rw-r--r--lib/ansible/module_utils/facts/system/distribution.py726
-rw-r--r--lib/ansible/module_utils/facts/system/dns.py68
-rw-r--r--lib/ansible/module_utils/facts/system/env.py39
-rw-r--r--lib/ansible/module_utils/facts/system/fips.py39
-rw-r--r--lib/ansible/module_utils/facts/system/loadavg.py31
-rw-r--r--lib/ansible/module_utils/facts/system/local.py113
-rw-r--r--lib/ansible/module_utils/facts/system/lsb.py108
-rw-r--r--lib/ansible/module_utils/facts/system/pkg_mgr.py165
-rw-r--r--lib/ansible/module_utils/facts/system/platform.py99
-rw-r--r--lib/ansible/module_utils/facts/system/python.py62
-rw-r--r--lib/ansible/module_utils/facts/system/selinux.py93
-rw-r--r--lib/ansible/module_utils/facts/system/service_mgr.py152
-rw-r--r--lib/ansible/module_utils/facts/system/ssh_pub_keys.py56
-rw-r--r--lib/ansible/module_utils/facts/system/user.py55
-rw-r--r--lib/ansible/module_utils/facts/timeout.py70
-rw-r--r--lib/ansible/module_utils/facts/utils.py102
-rw-r--r--lib/ansible/module_utils/facts/virtual/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/virtual/base.py80
-rw-r--r--lib/ansible/module_utils/facts/virtual/dragonfly.py25
-rw-r--r--lib/ansible/module_utils/facts/virtual/freebsd.py79
-rw-r--r--lib/ansible/module_utils/facts/virtual/hpux.py72
-rw-r--r--lib/ansible/module_utils/facts/virtual/linux.py405
-rw-r--r--lib/ansible/module_utils/facts/virtual/netbsd.py73
-rw-r--r--lib/ansible/module_utils/facts/virtual/openbsd.py74
-rw-r--r--lib/ansible/module_utils/facts/virtual/sunos.py139
-rw-r--r--lib/ansible/module_utils/facts/virtual/sysctl.py112
-rw-r--r--lib/ansible/module_utils/json_utils.py79
-rw-r--r--lib/ansible/module_utils/parsing/__init__.py0
-rw-r--r--lib/ansible/module_utils/parsing/convert_bool.py29
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1398
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm178
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm134
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm169
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1107
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm166
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1390
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1464
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm183
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm199
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1530
-rw-r--r--lib/ansible/module_utils/powershell/__init__.py0
-rw-r--r--lib/ansible/module_utils/pycompat24.py91
-rw-r--r--lib/ansible/module_utils/service.py274
-rw-r--r--lib/ansible/module_utils/six/__init__.py1009
-rw-r--r--lib/ansible/module_utils/splitter.py219
-rw-r--r--lib/ansible/module_utils/urls.py2070
-rw-r--r--lib/ansible/module_utils/yumdnf.py182
-rw-r--r--lib/ansible/modules/__init__.py0
-rw-r--r--lib/ansible/modules/_include.py80
-rw-r--r--lib/ansible/modules/add_host.py115
-rw-r--r--lib/ansible/modules/apt.py1487
-rw-r--r--lib/ansible/modules/apt_key.py530
-rw-r--r--lib/ansible/modules/apt_repository.py735
-rw-r--r--lib/ansible/modules/assemble.py280
-rw-r--r--lib/ansible/modules/assert.py105
-rw-r--r--lib/ansible/modules/async_status.py166
-rw-r--r--lib/ansible/modules/async_wrapper.py350
-rw-r--r--lib/ansible/modules/blockinfile.py387
-rw-r--r--lib/ansible/modules/command.py352
-rw-r--r--lib/ansible/modules/copy.py825
-rw-r--r--lib/ansible/modules/cron.py765
-rw-r--r--lib/ansible/modules/debconf.py231
-rw-r--r--lib/ansible/modules/debug.py99
-rw-r--r--lib/ansible/modules/dnf.py1468
-rw-r--r--lib/ansible/modules/dpkg_selections.py90
-rw-r--r--lib/ansible/modules/expect.py258
-rw-r--r--lib/ansible/modules/fail.py63
-rw-r--r--lib/ansible/modules/fetch.py124
-rw-r--r--lib/ansible/modules/file.py987
-rw-r--r--lib/ansible/modules/find.py534
-rw-r--r--lib/ansible/modules/gather_facts.py64
-rw-r--r--lib/ansible/modules/get_url.py706
-rw-r--r--lib/ansible/modules/getent.py200
-rw-r--r--lib/ansible/modules/git.py1418
-rw-r--r--lib/ansible/modules/group.py662
-rw-r--r--lib/ansible/modules/group_by.py89
-rw-r--r--lib/ansible/modules/hostname.py908
-rw-r--r--lib/ansible/modules/import_playbook.py77
-rw-r--r--lib/ansible/modules/import_role.py110
-rw-r--r--lib/ansible/modules/import_tasks.py77
-rw-r--r--lib/ansible/modules/include_role.py139
-rw-r--r--lib/ansible/modules/include_tasks.py99
-rw-r--r--lib/ansible/modules/include_vars.py196
-rw-r--r--lib/ansible/modules/iptables.py916
-rw-r--r--lib/ansible/modules/known_hosts.py365
-rw-r--r--lib/ansible/modules/lineinfile.py638
-rw-r--r--lib/ansible/modules/meta.py123
-rw-r--r--lib/ansible/modules/package.py87
-rw-r--r--lib/ansible/modules/package_facts.py552
-rw-r--r--lib/ansible/modules/pause.py117
-rw-r--r--lib/ansible/modules/ping.py89
-rw-r--r--lib/ansible/modules/pip.py832
-rw-r--r--lib/ansible/modules/raw.py88
-rw-r--r--lib/ansible/modules/reboot.py137
-rw-r--r--lib/ansible/modules/replace.py316
-rw-r--r--lib/ansible/modules/rpm_key.py253
-rw-r--r--lib/ansible/modules/script.py108
-rw-r--r--lib/ansible/modules/service.py1699
-rw-r--r--lib/ansible/modules/service_facts.py411
-rw-r--r--lib/ansible/modules/set_fact.py120
-rw-r--r--lib/ansible/modules/set_stats.py82
-rw-r--r--lib/ansible/modules/setup.py230
-rw-r--r--lib/ansible/modules/shell.py205
-rw-r--r--lib/ansible/modules/slurp.py123
-rw-r--r--lib/ansible/modules/stat.py560
-rw-r--r--lib/ansible/modules/subversion.py393
-rw-r--r--lib/ansible/modules/systemd.py569
-rw-r--r--lib/ansible/modules/systemd_service.py569
-rw-r--r--lib/ansible/modules/sysvinit.py364
-rw-r--r--lib/ansible/modules/tempfile.py124
-rw-r--r--lib/ansible/modules/template.py111
-rw-r--r--lib/ansible/modules/unarchive.py1115
-rw-r--r--lib/ansible/modules/uri.py779
-rw-r--r--lib/ansible/modules/user.py3253
-rw-r--r--lib/ansible/modules/validate_argument_spec.py118
-rw-r--r--lib/ansible/modules/wait_for.py689
-rw-r--r--lib/ansible/modules/wait_for_connection.py121
-rw-r--r--lib/ansible/modules/yum.py1818
-rw-r--r--lib/ansible/modules/yum_repository.py735
-rw-r--r--lib/ansible/parsing/__init__.py20
-rw-r--r--lib/ansible/parsing/ajson.py42
-rw-r--r--lib/ansible/parsing/dataloader.py468
-rw-r--r--lib/ansible/parsing/mod_args.py345
-rw-r--r--lib/ansible/parsing/plugin_docs.py227
-rw-r--r--lib/ansible/parsing/quoting.py31
-rw-r--r--lib/ansible/parsing/splitter.py286
-rw-r--r--lib/ansible/parsing/utils/__init__.py20
-rw-r--r--lib/ansible/parsing/utils/addresses.py216
-rw-r--r--lib/ansible/parsing/utils/jsonify.py38
-rw-r--r--lib/ansible/parsing/utils/yaml.py84
-rw-r--r--lib/ansible/parsing/vault/__init__.py1289
-rw-r--r--lib/ansible/parsing/yaml/__init__.py20
-rw-r--r--lib/ansible/parsing/yaml/constructor.py178
-rw-r--r--lib/ansible/parsing/yaml/dumper.py122
-rw-r--r--lib/ansible/parsing/yaml/loader.py45
-rw-r--r--lib/ansible/parsing/yaml/objects.py365
-rw-r--r--lib/ansible/playbook/__init__.py117
-rw-r--r--lib/ansible/playbook/attribute.py202
-rw-r--r--lib/ansible/playbook/base.py775
-rw-r--r--lib/ansible/playbook/block.py446
-rw-r--r--lib/ansible/playbook/collectionsearch.py63
-rw-r--r--lib/ansible/playbook/conditional.py221
-rw-r--r--lib/ansible/playbook/handler.py62
-rw-r--r--lib/ansible/playbook/handler_task_include.py39
-rw-r--r--lib/ansible/playbook/helpers.py353
-rw-r--r--lib/ansible/playbook/included_file.py223
-rw-r--r--lib/ansible/playbook/loop_control.py41
-rw-r--r--lib/ansible/playbook/play.py401
-rw-r--r--lib/ansible/playbook/play_context.py354
-rw-r--r--lib/ansible/playbook/playbook_include.py171
-rw-r--r--lib/ansible/playbook/role/__init__.py664
-rw-r--r--lib/ansible/playbook/role/definition.py240
-rw-r--r--lib/ansible/playbook/role/include.py57
-rw-r--r--lib/ansible/playbook/role/metadata.py130
-rw-r--r--lib/ansible/playbook/role/requirement.py128
-rw-r--r--lib/ansible/playbook/role_include.py185
-rw-r--r--lib/ansible/playbook/taggable.py89
-rw-r--r--lib/ansible/playbook/task.py511
-rw-r--r--lib/ansible/playbook/task_include.py150
-rw-r--r--lib/ansible/plugins/__init__.py143
-rw-r--r--lib/ansible/plugins/action/__init__.py1431
-rw-r--r--lib/ansible/plugins/action/add_host.py98
-rw-r--r--lib/ansible/plugins/action/assemble.py166
-rw-r--r--lib/ansible/plugins/action/assert.py94
-rw-r--r--lib/ansible/plugins/action/async_status.py53
-rw-r--r--lib/ansible/plugins/action/command.py28
-rw-r--r--lib/ansible/plugins/action/copy.py599
-rw-r--r--lib/ansible/plugins/action/debug.py80
-rw-r--r--lib/ansible/plugins/action/fail.py43
-rw-r--r--lib/ansible/plugins/action/fetch.py207
-rw-r--r--lib/ansible/plugins/action/gather_facts.py152
-rw-r--r--lib/ansible/plugins/action/group_by.py51
-rw-r--r--lib/ansible/plugins/action/include_vars.py290
-rw-r--r--lib/ansible/plugins/action/normal.py59
-rw-r--r--lib/ansible/plugins/action/package.py96
-rw-r--r--lib/ansible/plugins/action/pause.py311
-rw-r--r--lib/ansible/plugins/action/raw.py50
-rw-r--r--lib/ansible/plugins/action/reboot.py465
-rw-r--r--lib/ansible/plugins/action/script.py160
-rw-r--r--lib/ansible/plugins/action/service.py103
-rw-r--r--lib/ansible/plugins/action/set_fact.py68
-rw-r--r--lib/ansible/plugins/action/set_stats.py77
-rw-r--r--lib/ansible/plugins/action/shell.py27
-rw-r--r--lib/ansible/plugins/action/template.py190
-rw-r--r--lib/ansible/plugins/action/unarchive.py111
-rw-r--r--lib/ansible/plugins/action/uri.py94
-rw-r--r--lib/ansible/plugins/action/validate_argument_spec.py94
-rw-r--r--lib/ansible/plugins/action/wait_for_connection.py120
-rw-r--r--lib/ansible/plugins/action/yum.py109
-rw-r--r--lib/ansible/plugins/become/__init__.py108
-rw-r--r--lib/ansible/plugins/become/runas.py75
-rw-r--r--lib/ansible/plugins/become/su.py168
-rw-r--r--lib/ansible/plugins/become/sudo.py121
-rw-r--r--lib/ansible/plugins/cache/__init__.py375
-rw-r--r--lib/ansible/plugins/cache/base.py21
-rw-r--r--lib/ansible/plugins/cache/jsonfile.py64
-rw-r--r--lib/ansible/plugins/cache/memory.py53
-rw-r--r--lib/ansible/plugins/callback/__init__.py610
-rw-r--r--lib/ansible/plugins/callback/default.py409
-rw-r--r--lib/ansible/plugins/callback/junit.py364
-rw-r--r--lib/ansible/plugins/callback/minimal.py80
-rw-r--r--lib/ansible/plugins/callback/oneline.py77
-rw-r--r--lib/ansible/plugins/callback/tree.py86
-rw-r--r--lib/ansible/plugins/cliconf/__init__.py477
-rw-r--r--lib/ansible/plugins/connection/__init__.py382
-rw-r--r--lib/ansible/plugins/connection/local.py194
-rw-r--r--lib/ansible/plugins/connection/paramiko_ssh.py695
-rw-r--r--lib/ansible/plugins/connection/psrp.py898
-rw-r--r--lib/ansible/plugins/connection/ssh.py1399
-rw-r--r--lib/ansible/plugins/connection/winrm.py755
-rw-r--r--lib/ansible/plugins/doc_fragments/__init__.py0
-rw-r--r--lib/ansible/plugins/doc_fragments/action_common_attributes.py71
-rw-r--r--lib/ansible/plugins/doc_fragments/action_core.py80
-rw-r--r--lib/ansible/plugins/doc_fragments/backup.py20
-rw-r--r--lib/ansible/plugins/doc_fragments/connection_pipelining.py31
-rw-r--r--lib/ansible/plugins/doc_fragments/constructed.py83
-rw-r--r--lib/ansible/plugins/doc_fragments/decrypt.py20
-rw-r--r--lib/ansible/plugins/doc_fragments/default_callback.py93
-rw-r--r--lib/ansible/plugins/doc_fragments/files.py91
-rw-r--r--lib/ansible/plugins/doc_fragments/inventory_cache.py80
-rw-r--r--lib/ansible/plugins/doc_fragments/result_format_callback.py50
-rw-r--r--lib/ansible/plugins/doc_fragments/return_common.py42
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_common.py98
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_windows.py51
-rw-r--r--lib/ansible/plugins/doc_fragments/template_common.py121
-rw-r--r--lib/ansible/plugins/doc_fragments/url.py75
-rw-r--r--lib/ansible/plugins/doc_fragments/url_windows.py150
-rw-r--r--lib/ansible/plugins/doc_fragments/validate.py21
-rw-r--r--lib/ansible/plugins/doc_fragments/vars_plugin_staging.py24
-rw-r--r--lib/ansible/plugins/filter/__init__.py14
-rw-r--r--lib/ansible/plugins/filter/b64decode.yml29
-rw-r--r--lib/ansible/plugins/filter/b64encode.yml25
-rw-r--r--lib/ansible/plugins/filter/basename.yml24
-rw-r--r--lib/ansible/plugins/filter/bool.yml28
-rw-r--r--lib/ansible/plugins/filter/checksum.yml21
-rw-r--r--lib/ansible/plugins/filter/combinations.yml26
-rw-r--r--lib/ansible/plugins/filter/combine.yml44
-rw-r--r--lib/ansible/plugins/filter/comment.yml60
-rw-r--r--lib/ansible/plugins/filter/core.py658
-rw-r--r--lib/ansible/plugins/filter/dict2items.yml45
-rw-r--r--lib/ansible/plugins/filter/difference.yml35
-rw-r--r--lib/ansible/plugins/filter/dirname.yml24
-rw-r--r--lib/ansible/plugins/filter/encryption.py82
-rw-r--r--lib/ansible/plugins/filter/expanduser.yml21
-rw-r--r--lib/ansible/plugins/filter/expandvars.yml21
-rw-r--r--lib/ansible/plugins/filter/extract.yml39
-rw-r--r--lib/ansible/plugins/filter/fileglob.yml22
-rw-r--r--lib/ansible/plugins/filter/flatten.yml32
-rw-r--r--lib/ansible/plugins/filter/from_json.yml25
-rw-r--r--lib/ansible/plugins/filter/from_yaml.yml25
-rw-r--r--lib/ansible/plugins/filter/from_yaml_all.yml28
-rw-r--r--lib/ansible/plugins/filter/hash.yml28
-rw-r--r--lib/ansible/plugins/filter/human_readable.yml35
-rw-r--r--lib/ansible/plugins/filter/human_to_bytes.yml34
-rw-r--r--lib/ansible/plugins/filter/intersect.yml35
-rw-r--r--lib/ansible/plugins/filter/items2dict.yml48
-rw-r--r--lib/ansible/plugins/filter/log.yml33
-rw-r--r--lib/ansible/plugins/filter/mandatory.yml21
-rw-r--r--lib/ansible/plugins/filter/mathstuff.py252
-rw-r--r--lib/ansible/plugins/filter/md5.yml24
-rw-r--r--lib/ansible/plugins/filter/password_hash.yml37
-rw-r--r--lib/ansible/plugins/filter/path_join.yml30
-rw-r--r--lib/ansible/plugins/filter/permutations.yml26
-rw-r--r--lib/ansible/plugins/filter/pow.yml34
-rw-r--r--lib/ansible/plugins/filter/product.yml42
-rw-r--r--lib/ansible/plugins/filter/quote.yml23
-rw-r--r--lib/ansible/plugins/filter/random.yml35
-rw-r--r--lib/ansible/plugins/filter/realpath.yml21
-rw-r--r--lib/ansible/plugins/filter/regex_escape.yml29
-rw-r--r--lib/ansible/plugins/filter/regex_findall.yml37
-rw-r--r--lib/ansible/plugins/filter/regex_replace.yml46
-rw-r--r--lib/ansible/plugins/filter/regex_search.yml38
-rw-r--r--lib/ansible/plugins/filter/rekey_on_member.yml30
-rw-r--r--lib/ansible/plugins/filter/relpath.yml28
-rw-r--r--lib/ansible/plugins/filter/root.yml32
-rw-r--r--lib/ansible/plugins/filter/sha1.yml24
-rw-r--r--lib/ansible/plugins/filter/shuffle.yml27
-rw-r--r--lib/ansible/plugins/filter/split.yml32
-rw-r--r--lib/ansible/plugins/filter/splitext.yml30
-rw-r--r--lib/ansible/plugins/filter/strftime.yml45
-rw-r--r--lib/ansible/plugins/filter/subelements.yml38
-rw-r--r--lib/ansible/plugins/filter/symmetric_difference.yml35
-rw-r--r--lib/ansible/plugins/filter/ternary.yml44
-rw-r--r--lib/ansible/plugins/filter/to_datetime.yml35
-rw-r--r--lib/ansible/plugins/filter/to_json.yml69
-rw-r--r--lib/ansible/plugins/filter/to_nice_json.yml54
-rw-r--r--lib/ansible/plugins/filter/to_nice_yaml.yml39
-rw-r--r--lib/ansible/plugins/filter/to_uuid.yml30
-rw-r--r--lib/ansible/plugins/filter/to_yaml.yml52
-rw-r--r--lib/ansible/plugins/filter/type_debug.yml20
-rw-r--r--lib/ansible/plugins/filter/union.yml35
-rw-r--r--lib/ansible/plugins/filter/unique.yml30
-rw-r--r--lib/ansible/plugins/filter/unvault.yml36
-rw-r--r--lib/ansible/plugins/filter/urldecode.yml48
-rw-r--r--lib/ansible/plugins/filter/urls.py20
-rw-r--r--lib/ansible/plugins/filter/urlsplit.py87
-rw-r--r--lib/ansible/plugins/filter/vault.yml48
-rw-r--r--lib/ansible/plugins/filter/win_basename.yml24
-rw-r--r--lib/ansible/plugins/filter/win_dirname.yml24
-rw-r--r--lib/ansible/plugins/filter/win_splitdrive.yml29
-rw-r--r--lib/ansible/plugins/filter/zip.yml43
-rw-r--r--lib/ansible/plugins/filter/zip_longest.yml36
-rw-r--r--lib/ansible/plugins/httpapi/__init__.py87
-rw-r--r--lib/ansible/plugins/inventory/__init__.py463
-rw-r--r--lib/ansible/plugins/inventory/advanced_host_list.py63
-rw-r--r--lib/ansible/plugins/inventory/auto.py63
-rw-r--r--lib/ansible/plugins/inventory/constructed.py177
-rw-r--r--lib/ansible/plugins/inventory/generator.py135
-rw-r--r--lib/ansible/plugins/inventory/host_list.py66
-rw-r--r--lib/ansible/plugins/inventory/ini.py393
-rw-r--r--lib/ansible/plugins/inventory/script.py196
-rw-r--r--lib/ansible/plugins/inventory/toml.py298
-rw-r--r--lib/ansible/plugins/inventory/yaml.py183
-rw-r--r--lib/ansible/plugins/list.py210
-rw-r--r--lib/ansible/plugins/loader.py1622
-rw-r--r--lib/ansible/plugins/lookup/__init__.py130
-rw-r--r--lib/ansible/plugins/lookup/config.py156
-rw-r--r--lib/ansible/plugins/lookup/csvfile.py181
-rw-r--r--lib/ansible/plugins/lookup/dict.py77
-rw-r--r--lib/ansible/plugins/lookup/env.py79
-rw-r--r--lib/ansible/plugins/lookup/file.py88
-rw-r--r--lib/ansible/plugins/lookup/fileglob.py84
-rw-r--r--lib/ansible/plugins/lookup/first_found.py235
-rw-r--r--lib/ansible/plugins/lookup/indexed_items.py52
-rw-r--r--lib/ansible/plugins/lookup/ini.py204
-rw-r--r--lib/ansible/plugins/lookup/inventory_hostnames.py53
-rw-r--r--lib/ansible/plugins/lookup/items.py73
-rw-r--r--lib/ansible/plugins/lookup/lines.py62
-rw-r--r--lib/ansible/plugins/lookup/list.py45
-rw-r--r--lib/ansible/plugins/lookup/nested.py85
-rw-r--r--lib/ansible/plugins/lookup/password.py389
-rw-r--r--lib/ansible/plugins/lookup/pipe.py76
-rw-r--r--lib/ansible/plugins/lookup/random_choice.py53
-rw-r--r--lib/ansible/plugins/lookup/sequence.py268
-rw-r--r--lib/ansible/plugins/lookup/subelements.py169
-rw-r--r--lib/ansible/plugins/lookup/template.py165
-rw-r--r--lib/ansible/plugins/lookup/together.py68
-rw-r--r--lib/ansible/plugins/lookup/unvault.py63
-rw-r--r--lib/ansible/plugins/lookup/url.py264
-rw-r--r--lib/ansible/plugins/lookup/varnames.py79
-rw-r--r--lib/ansible/plugins/lookup/vars.py106
-rw-r--r--lib/ansible/plugins/netconf/__init__.py375
-rw-r--r--lib/ansible/plugins/shell/__init__.py239
-rw-r--r--lib/ansible/plugins/shell/cmd.py57
-rw-r--r--lib/ansible/plugins/shell/powershell.py287
-rw-r--r--lib/ansible/plugins/shell/sh.py79
-rw-r--r--lib/ansible/plugins/strategy/__init__.py1202
-rw-r--r--lib/ansible/plugins/strategy/debug.py37
-rw-r--r--lib/ansible/plugins/strategy/free.py303
-rw-r--r--lib/ansible/plugins/strategy/host_pinned.py45
-rw-r--r--lib/ansible/plugins/strategy/linear.py406
-rw-r--r--lib/ansible/plugins/terminal/__init__.py133
-rw-r--r--lib/ansible/plugins/test/__init__.py13
-rw-r--r--lib/ansible/plugins/test/abs.yml23
-rw-r--r--lib/ansible/plugins/test/all.yml23
-rw-r--r--lib/ansible/plugins/test/any.yml23
-rw-r--r--lib/ansible/plugins/test/change.yml22
-rw-r--r--lib/ansible/plugins/test/changed.yml22
-rw-r--r--lib/ansible/plugins/test/contains.yml49
-rw-r--r--lib/ansible/plugins/test/core.py287
-rw-r--r--lib/ansible/plugins/test/directory.yml21
-rw-r--r--lib/ansible/plugins/test/exists.yml22
-rw-r--r--lib/ansible/plugins/test/failed.yml23
-rw-r--r--lib/ansible/plugins/test/failure.yml23
-rw-r--r--lib/ansible/plugins/test/falsy.yml24
-rw-r--r--lib/ansible/plugins/test/file.yml22
-rw-r--r--lib/ansible/plugins/test/files.py48
-rw-r--r--lib/ansible/plugins/test/finished.yml21
-rw-r--r--lib/ansible/plugins/test/is_abs.yml23
-rw-r--r--lib/ansible/plugins/test/is_dir.yml21
-rw-r--r--lib/ansible/plugins/test/is_file.yml22
-rw-r--r--lib/ansible/plugins/test/is_link.yml21
-rw-r--r--lib/ansible/plugins/test/is_mount.yml22
-rw-r--r--lib/ansible/plugins/test/is_same_file.yml24
-rw-r--r--lib/ansible/plugins/test/isnan.yml20
-rw-r--r--lib/ansible/plugins/test/issubset.yml28
-rw-r--r--lib/ansible/plugins/test/issuperset.yml28
-rw-r--r--lib/ansible/plugins/test/link.yml21
-rw-r--r--lib/ansible/plugins/test/link_exists.yml21
-rw-r--r--lib/ansible/plugins/test/match.yml32
-rw-r--r--lib/ansible/plugins/test/mathstuff.py62
-rw-r--r--lib/ansible/plugins/test/mount.yml22
-rw-r--r--lib/ansible/plugins/test/nan.yml20
-rw-r--r--lib/ansible/plugins/test/reachable.yml21
-rw-r--r--lib/ansible/plugins/test/regex.yml37
-rw-r--r--lib/ansible/plugins/test/same_file.yml24
-rw-r--r--lib/ansible/plugins/test/search.yml33
-rw-r--r--lib/ansible/plugins/test/skip.yml22
-rw-r--r--lib/ansible/plugins/test/skipped.yml22
-rw-r--r--lib/ansible/plugins/test/started.yml21
-rw-r--r--lib/ansible/plugins/test/subset.yml28
-rw-r--r--lib/ansible/plugins/test/succeeded.yml22
-rw-r--r--lib/ansible/plugins/test/success.yml22
-rw-r--r--lib/ansible/plugins/test/successful.yml22
-rw-r--r--lib/ansible/plugins/test/superset.yml28
-rw-r--r--lib/ansible/plugins/test/truthy.yml24
-rw-r--r--lib/ansible/plugins/test/unreachable.yml21
-rw-r--r--lib/ansible/plugins/test/uri.py46
-rw-r--r--lib/ansible/plugins/test/uri.yml30
-rw-r--r--lib/ansible/plugins/test/url.yml29
-rw-r--r--lib/ansible/plugins/test/urn.yml21
-rw-r--r--lib/ansible/plugins/test/vault_encrypted.yml19
-rw-r--r--lib/ansible/plugins/test/version.yml82
-rw-r--r--lib/ansible/plugins/test/version_compare.yml82
-rw-r--r--lib/ansible/plugins/vars/__init__.py41
-rw-r--r--lib/ansible/plugins/vars/host_group_vars.py116
-rw-r--r--lib/ansible/release.py24
-rw-r--r--lib/ansible/template/__init__.py1027
-rw-r--r--lib/ansible/template/native_helpers.py144
-rw-r--r--lib/ansible/template/template.py45
-rw-r--r--lib/ansible/template/vars.py128
-rw-r--r--lib/ansible/utils/__init__.py20
-rw-r--r--lib/ansible/utils/_junit_xml.py267
-rw-r--r--lib/ansible/utils/cmd_functions.py66
-rw-r--r--lib/ansible/utils/collection_loader/__init__.py26
-rw-r--r--lib/ansible/utils/collection_loader/_collection_config.py103
-rw-r--r--lib/ansible/utils/collection_loader/_collection_finder.py1161
-rw-r--r--lib/ansible/utils/collection_loader/_collection_meta.py32
-rw-r--r--lib/ansible/utils/color.py112
-rw-r--r--lib/ansible/utils/context_objects.py92
-rw-r--r--lib/ansible/utils/display.py526
-rw-r--r--lib/ansible/utils/encrypt.py272
-rw-r--r--lib/ansible/utils/fqcn.py33
-rw-r--r--lib/ansible/utils/galaxy.py107
-rw-r--r--lib/ansible/utils/hashing.py89
-rw-r--r--lib/ansible/utils/helpers.py51
-rw-r--r--lib/ansible/utils/jsonrpc.py113
-rw-r--r--lib/ansible/utils/listify.py46
-rw-r--r--lib/ansible/utils/lock.py43
-rw-r--r--lib/ansible/utils/multiprocessing.py17
-rw-r--r--lib/ansible/utils/native_jinja.py13
-rw-r--r--lib/ansible/utils/path.py161
-rw-r--r--lib/ansible/utils/plugin_docs.py351
-rw-r--r--lib/ansible/utils/py3compat.py70
-rw-r--r--lib/ansible/utils/sentinel.py68
-rw-r--r--lib/ansible/utils/shlex.py34
-rw-r--r--lib/ansible/utils/singleton.py29
-rw-r--r--lib/ansible/utils/ssh_functions.py66
-rw-r--r--lib/ansible/utils/unicode.py33
-rw-r--r--lib/ansible/utils/unsafe_proxy.py128
-rw-r--r--lib/ansible/utils/vars.py293
-rw-r--r--lib/ansible/utils/version.py272
-rw-r--r--lib/ansible/vars/__init__.py0
-rw-r--r--lib/ansible/vars/clean.py171
-rw-r--r--lib/ansible/vars/fact_cache.py72
-rw-r--r--lib/ansible/vars/hostvars.py155
-rw-r--r--lib/ansible/vars/manager.py749
-rw-r--r--lib/ansible/vars/plugins.py114
-rw-r--r--lib/ansible/vars/reserved.py84
676 files changed, 143985 insertions, 0 deletions
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
new file mode 100644
index 0000000..e4905a1
--- /dev/null
+++ b/lib/ansible/__init__.py
@@ -0,0 +1,31 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# make vendored top-level modules accessible EARLY
+import ansible._vendor
+
+# Note: Do not add any code to this file. The ansible module may be
+# a namespace package when using Ansible-2.1+ Anything in this file may not be
+# available if one of the other packages in the namespace is loaded first.
+#
+# This is for backwards compat. Code should be ported to get these from
+# ansible.release instead of from here.
+from ansible.release import __version__, __author__
diff --git a/lib/ansible/__main__.py b/lib/ansible/__main__.py
new file mode 100644
index 0000000..5a753ec
--- /dev/null
+++ b/lib/ansible/__main__.py
@@ -0,0 +1,41 @@
+# Copyright: (c) 2021, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+import argparse
+import importlib
+import os
+import sys
+
+from importlib.metadata import distribution
+
+
+def _short_name(name):
+ return name.removeprefix('ansible-').replace('ansible', 'adhoc')
+
+
+def main():
+ dist = distribution('ansible-core')
+ ep_map = {_short_name(ep.name): ep for ep in dist.entry_points if ep.group == 'console_scripts'}
+
+ parser = argparse.ArgumentParser(prog='python -m ansible', add_help=False)
+ parser.add_argument('entry_point', choices=list(ep_map) + ['test'])
+ args, extra = parser.parse_known_args()
+
+ if args.entry_point == 'test':
+ ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ source_root = os.path.join(ansible_root, 'test', 'lib')
+
+ if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
+ # running from source, use that version of ansible-test instead of any version that may already be installed
+ sys.path.insert(0, source_root)
+
+ module = importlib.import_module('ansible_test._util.target.cli.ansible_test_cli_stub')
+ main = module.main
+ else:
+ main = ep_map[args.entry_point].load()
+
+ main([args.entry_point] + extra)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/_vendor/__init__.py b/lib/ansible/_vendor/__init__.py
new file mode 100644
index 0000000..a31957b
--- /dev/null
+++ b/lib/ansible/_vendor/__init__.py
@@ -0,0 +1,46 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pkgutil
+import sys
+import warnings
+
+# This package exists to host vendored top-level Python packages for downstream packaging. Any Python packages
+# installed beneath this one will be masked from the Ansible loader, and available from the front of sys.path.
+# It is expected that the vendored packages will be loaded very early, so a warning will be fired on import of
+# the top-level ansible package if any packages beneath this are already loaded at that point.
+#
+# Python packages may be installed here during downstream packaging using something like:
+# pip install --upgrade -t (path to this dir) cryptography pyyaml packaging jinja2
+
+# mask vendored content below this package from being accessed as an ansible subpackage
+__path__ = []
+
+
+def _ensure_vendored_path_entry():
+ """
+ Ensure that any downstream-bundled content beneath this package is available at the top of sys.path
+ """
+ # patch our vendored dir onto sys.path
+ vendored_path_entry = os.path.dirname(__file__)
+ vendored_module_names = set(m[1] for m in pkgutil.iter_modules([vendored_path_entry], '')) # m[1] == m.name
+
+ if vendored_module_names:
+ # patch us early to load vendored deps transparently
+ if vendored_path_entry in sys.path:
+ # handle reload case by removing the existing entry, wherever it might be
+ sys.path.remove(vendored_path_entry)
+ sys.path.insert(0, vendored_path_entry)
+
+ already_loaded_vendored_modules = set(sys.modules.keys()).intersection(vendored_module_names)
+
+ if already_loaded_vendored_modules:
+ warnings.warn('One or more Python packages bundled by this ansible-core distribution were already '
+ 'loaded ({0}). This may result in undefined behavior.'.format(', '.join(sorted(already_loaded_vendored_modules))))
+
+
+_ensure_vendored_path_entry()
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
new file mode 100644
index 0000000..15ab5fe
--- /dev/null
+++ b/lib/ansible/cli/__init__.py
@@ -0,0 +1,689 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import locale
+import os
+import sys
+
+# Used for determining if the system is running a new enough python version
+# and should only restrict on our documented minimum versions
+if sys.version_info < (3, 9):
+ raise SystemExit(
+ 'ERROR: Ansible requires Python 3.9 or newer on the controller. '
+ 'Current version: %s' % ''.join(sys.version.splitlines())
+ )
+
+
+def check_blocking_io():
+ """Check stdin/stdout/stderr to make sure they are using blocking IO."""
+ handles = []
+
+ for handle in (sys.stdin, sys.stdout, sys.stderr):
+ # noinspection PyBroadException
+ try:
+ fd = handle.fileno()
+ except Exception:
+ continue # not a real file handle, such as during the import sanity test
+
+ if not os.get_blocking(fd):
+ handles.append(getattr(handle, 'name', None) or '#%s' % fd)
+
+ if handles:
+ raise SystemExit('ERROR: Ansible requires blocking IO on stdin/stdout/stderr. '
+ 'Non-blocking file handles detected: %s' % ', '.join(_io for _io in handles))
+
+
+check_blocking_io()
+
+
+def initialize_locale():
+ """Set the locale to the users default setting and ensure
+ the locale and filesystem encoding are UTF-8.
+ """
+ try:
+ locale.setlocale(locale.LC_ALL, '')
+ dummy, encoding = locale.getlocale()
+ except (locale.Error, ValueError) as e:
+ raise SystemExit(
+ 'ERROR: Ansible could not initialize the preferred locale: %s' % e
+ )
+
+ if not encoding or encoding.lower() not in ('utf-8', 'utf8'):
+ raise SystemExit('ERROR: Ansible requires the locale encoding to be UTF-8; Detected %s.' % encoding)
+
+ fs_enc = sys.getfilesystemencoding()
+ if fs_enc.lower() != 'utf-8':
+ raise SystemExit('ERROR: Ansible requires the filesystem encoding to be UTF-8; Detected %s.' % fs_enc)
+
+
+initialize_locale()
+
+
+from importlib.metadata import version
+from ansible.module_utils.compat.version import LooseVersion
+
+# Used for determining if the system is running a new enough Jinja2 version
+# and should only restrict on our documented minimum versions
+jinja2_version = version('jinja2')
+if jinja2_version < LooseVersion('3.0'):
+ raise SystemExit(
+ 'ERROR: Ansible requires Jinja2 3.0 or newer on the controller. '
+ 'Current version: %s' % jinja2_version
+ )
+
+import errno
+import getpass
+import subprocess
+import traceback
+from abc import ABC, abstractmethod
+from pathlib import Path
+
+try:
+ from ansible import constants as C
+ from ansible.utils.display import Display
+ display = Display()
+except Exception as e:
+ print('ERROR: %s' % e, file=sys.stderr)
+ sys.exit(5)
+
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.inventory.manager import InventoryManager
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common.file import is_executable
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.release import __version__
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.path import unfrackpath
+from ansible.utils.unsafe_proxy import to_unsafe_text
+from ansible.vars.manager import VariableManager
+
+try:
+ import argcomplete
+ HAS_ARGCOMPLETE = True
+except ImportError:
+ HAS_ARGCOMPLETE = False
+
+
+class CLI(ABC):
+ ''' code behind bin/ansible* programs '''
+
+ PAGER = 'less'
+
+ # -F (quit-if-one-screen) -R (allow raw ansi control chars)
+ # -S (chop long lines) -X (disable termcap init and de-init)
+ LESS_OPTS = 'FRSX'
+ SKIP_INVENTORY_DEFAULTS = False
+
+ def __init__(self, args, callback=None):
+ """
+ Base init method for all command line programs
+ """
+
+ if not args:
+ raise ValueError('A non-empty list for args is required')
+
+ self.args = args
+ self.parser = None
+ self.callback = callback
+
+ if C.DEVEL_WARNING and __version__.endswith('dev0'):
+ display.warning(
+ 'You are running the development version of Ansible. You should only run Ansible from "devel" if '
+ 'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
+ 'changing source of code and can become unstable at any point.'
+ )
+
+ @abstractmethod
+ def run(self):
+ """Run the ansible command
+
+ Subclasses must implement this method. It does the actual work of
+ running an Ansible command.
+ """
+ self.parse()
+
+ display.vv(to_text(opt_help.version(self.parser.prog)))
+
+ if C.CONFIG_FILE:
+ display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
+ else:
+ display.v(u"No config file found; using defaults")
+
+ # warn about deprecated config options
+ for deprecated in C.config.DEPRECATED:
+ name = deprecated[0]
+ why = deprecated[1]['why']
+ if 'alternatives' in deprecated[1]:
+ alt = ', use %s instead' % deprecated[1]['alternatives']
+ else:
+ alt = ''
+ ver = deprecated[1].get('version')
+ date = deprecated[1].get('date')
+ collection_name = deprecated[1].get('collection_name')
+ display.deprecated("%s option, %s%s" % (name, why, alt),
+ version=ver, date=date, collection_name=collection_name)
+
+ @staticmethod
+ def split_vault_id(vault_id):
+ # return (before_@, after_@)
+ # if no @, return whole string as after_
+ if '@' not in vault_id:
+ return (None, vault_id)
+
+ parts = vault_id.split('@', 1)
+ ret = tuple(parts)
+ return ret
+
+ @staticmethod
+ def build_vault_ids(vault_ids, vault_password_files=None,
+ ask_vault_pass=None, create_new_password=None,
+ auto_prompt=True):
+ vault_password_files = vault_password_files or []
+ vault_ids = vault_ids or []
+
+ # convert vault_password_files into vault_ids slugs
+ for password_file in vault_password_files:
+ id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
+
+ # note this makes --vault-id higher precedence than --vault-password-file
+ # if we want to intertwingle them in order probably need a cli callback to populate vault_ids
+ # used by --vault-id and --vault-password-file
+ vault_ids.append(id_slug)
+
+ # if an action needs an encrypt password (create_new_password=True) and we dont
+ # have other secrets setup, then automatically add a password prompt as well.
+ # prompts cant/shouldnt work without a tty, so dont add prompt secrets
+ if ask_vault_pass or (not vault_ids and auto_prompt):
+
+ id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
+ vault_ids.append(id_slug)
+
+ return vault_ids
+
+ # TODO: remove the now unused args
+ @staticmethod
+ def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
+ ask_vault_pass=None, create_new_password=False,
+ auto_prompt=True):
+ # list of tuples
+ vault_secrets = []
+
+ # Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
+ # we need to show different prompts. This is for compat with older Towers that expect a
+ # certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
+ prompt_formats = {}
+
+ # If there are configured default vault identities, they are considered 'first'
+ # so we prepend them to vault_ids (from cli) here
+
+ vault_password_files = vault_password_files or []
+ if C.DEFAULT_VAULT_PASSWORD_FILE:
+ vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
+
+ if create_new_password:
+ prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
+ 'Confirm new vault password (%(vault_id)s): ']
+ # 2.3 format prompts for --ask-vault-pass
+ prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
+ 'Confirm New Vault password: ']
+ else:
+ prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
+ # The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
+ prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
+
+ vault_ids = CLI.build_vault_ids(vault_ids,
+ vault_password_files,
+ ask_vault_pass,
+ create_new_password,
+ auto_prompt=auto_prompt)
+
+ last_exception = found_vault_secret = None
+ for vault_id_slug in vault_ids:
+ vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
+ if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
+
+ # --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
+ # confusing since it will use the old format without the vault id in the prompt
+ built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
+
+ # choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
+ # always gets the old format for Tower compatibility.
+ # ie, we used --ask-vault-pass, so we need to use the old vault password prompt
+ # format since Tower needs to match on that format.
+ prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
+ vault_id=built_vault_id)
+
+ # a empty or invalid password from the prompt will warn and continue to the next
+ # without erroring globally
+ try:
+ prompted_vault_secret.load()
+ except AnsibleError as exc:
+ display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
+ raise
+
+ found_vault_secret = True
+ vault_secrets.append((built_vault_id, prompted_vault_secret))
+
+ # update loader with new secrets incrementally, so we can load a vault password
+ # that is encrypted with a vault secret provided earlier
+ loader.set_vault_secrets(vault_secrets)
+ continue
+
+ # assuming anything else is a password file
+ display.vvvvv('Reading vault password file: %s' % vault_id_value)
+ # read vault_pass from a file
+ try:
+ file_vault_secret = get_file_vault_secret(filename=vault_id_value,
+ vault_id=vault_id_name,
+ loader=loader)
+ except AnsibleError as exc:
+ display.warning('Error getting vault password file (%s): %s' % (vault_id_name, to_text(exc)))
+ last_exception = exc
+ continue
+
+ try:
+ file_vault_secret.load()
+ except AnsibleError as exc:
+ display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
+ last_exception = exc
+ continue
+
+ found_vault_secret = True
+ if vault_id_name:
+ vault_secrets.append((vault_id_name, file_vault_secret))
+ else:
+ vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
+
+ # update loader with as-yet-known vault secrets
+ loader.set_vault_secrets(vault_secrets)
+
+ # An invalid or missing password file will error globally
+ # if no valid vault secret was found.
+ if last_exception and not found_vault_secret:
+ raise last_exception
+
+ return vault_secrets
+
+ @staticmethod
+ def _get_secret(prompt):
+
+ secret = getpass.getpass(prompt=prompt)
+ if secret:
+ secret = to_unsafe_text(secret)
+ return secret
+
+ @staticmethod
+ def ask_passwords():
+ ''' prompt for connection and become passwords if needed '''
+
+ op = context.CLIARGS
+ sshpass = None
+ becomepass = None
+ become_prompt = ''
+
+ become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
+
+ try:
+ become_prompt = "%s password: " % become_prompt_method
+ if op['ask_pass']:
+ sshpass = CLI._get_secret("SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
+ elif op['connection_password_file']:
+ sshpass = CLI.get_password_from_file(op['connection_password_file'])
+
+ if op['become_ask_pass']:
+ becomepass = CLI._get_secret(become_prompt)
+ if op['ask_pass'] and becomepass == '':
+ becomepass = sshpass
+ elif op['become_password_file']:
+ becomepass = CLI.get_password_from_file(op['become_password_file'])
+
+ except EOFError:
+ pass
+
+ return (sshpass, becomepass)
+
+ def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
+ ''' check for conflicting options '''
+
+ if fork_opts:
+ if op.forks < 1:
+ self.parser.error("The number of processes (--forks) must be >= 1")
+
+ return op
+
+ @abstractmethod
+ def init_parser(self, usage="", desc=None, epilog=None):
+ """
+ Create an options parser for most ansible scripts
+
+ Subclasses need to implement this method. They will usually call the base class's
+ init_parser to create a basic version and then add their own options on top of that.
+
+ An implementation will look something like this::
+
+ def init_parser(self):
+ super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
+ ansible.arguments.option_helpers.add_runas_options(self.parser)
+ self.parser.add_option('--my-option', dest='my_option', action='store')
+ """
+ self.parser = opt_help.create_base_parser(self.name, usage=usage, desc=desc, epilog=epilog)
+
+ @abstractmethod
+ def post_process_args(self, options):
+ """Process the command line args
+
+ Subclasses need to implement this method. This method validates and transforms the command
+ line arguments. It can be used to check whether conflicting values were given, whether filenames
+ exist, etc.
+
+ An implementation will look something like this::
+
+ def post_process_args(self, options):
+ options = super(MyCLI, self).post_process_args(options)
+ if options.addition and options.subtraction:
+ raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
+ if isinstance(options.listofhosts, string_types):
+ options.listofhosts = string_types.split(',')
+ return options
+ """
+
+ # process tags
+ if hasattr(options, 'tags') and not options.tags:
+ # optparse defaults does not do what's expected
+ # More specifically, we want `--tags` to be additive. So we cannot
+ # simply change C.TAGS_RUN's default to ["all"] because then passing
+ # --tags foo would cause us to have ['all', 'foo']
+ options.tags = ['all']
+ if hasattr(options, 'tags') and options.tags:
+ tags = set()
+ for tag_set in options.tags:
+ for tag in tag_set.split(u','):
+ tags.add(tag.strip())
+ options.tags = list(tags)
+
+ # process skip_tags
+ if hasattr(options, 'skip_tags') and options.skip_tags:
+ skip_tags = set()
+ for tag_set in options.skip_tags:
+ for tag in tag_set.split(u','):
+ skip_tags.add(tag.strip())
+ options.skip_tags = list(skip_tags)
+
+ # process inventory options except for CLIs that require their own processing
+ if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
+
+ if options.inventory:
+
+ # should always be list
+ if isinstance(options.inventory, string_types):
+ options.inventory = [options.inventory]
+
+ # Ensure full paths when needed
+ options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
+ else:
+ options.inventory = C.DEFAULT_HOST_LIST
+
+ return options
+
+ def parse(self):
+ """Parse the command line args
+
+ This method parses the command line arguments. It uses the parser
+ stored in the self.parser attribute and saves the args and options in
+ context.CLIARGS.
+
+ Subclasses need to implement two helper methods, init_parser() and post_process_args() which
+ are called from this function before and after parsing the arguments.
+ """
+ self.init_parser()
+
+ if HAS_ARGCOMPLETE:
+ argcomplete.autocomplete(self.parser)
+
+ try:
+ options = self.parser.parse_args(self.args[1:])
+ except SystemExit as ex:
+ if ex.code != 0:
+ self.parser.exit(status=2, message=" \n%s" % self.parser.format_help())
+ raise
+ options = self.post_process_args(options)
+ context._init_global_context(options)
+
+ @staticmethod
+ def version_info(gitinfo=False):
+ ''' return full ansible version info '''
+ if gitinfo:
+ # expensive call, user with care
+ ansible_version_string = opt_help.version()
+ else:
+ ansible_version_string = __version__
+ ansible_version = ansible_version_string.split()[0]
+ ansible_versions = ansible_version.split('.')
+ for counter in range(len(ansible_versions)):
+ if ansible_versions[counter] == "":
+ ansible_versions[counter] = 0
+ try:
+ ansible_versions[counter] = int(ansible_versions[counter])
+ except Exception:
+ pass
+ if len(ansible_versions) < 3:
+ for counter in range(len(ansible_versions), 3):
+ ansible_versions.append(0)
+ return {'string': ansible_version_string.strip(),
+ 'full': ansible_version,
+ 'major': ansible_versions[0],
+ 'minor': ansible_versions[1],
+ 'revision': ansible_versions[2]}
+
+ @staticmethod
+ def pager(text):
+ ''' find reasonable way to display text '''
+ # this is a much simpler form of what is in pydoc.py
+ if not sys.stdout.isatty():
+ display.display(text, screen_only=True)
+ elif 'PAGER' in os.environ:
+ if sys.platform == 'win32':
+ display.display(text, screen_only=True)
+ else:
+ CLI.pager_pipe(text, os.environ['PAGER'])
+ else:
+ p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ if p.returncode == 0:
+ CLI.pager_pipe(text, 'less')
+ else:
+ display.display(text, screen_only=True)
+
+ @staticmethod
+ def pager_pipe(text, cmd):
+ ''' pipe text through a pager '''
+ if 'LESS' not in os.environ:
+ os.environ['LESS'] = CLI.LESS_OPTS
+ try:
+ cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
+ cmd.communicate(input=to_bytes(text))
+ except IOError:
+ pass
+ except KeyboardInterrupt:
+ pass
+
+ @staticmethod
+ def _play_prereqs():
+ options = context.CLIARGS
+
+ # all needs loader
+ loader = DataLoader()
+
+ basedir = options.get('basedir', False)
+ if basedir:
+ loader.set_basedir(basedir)
+ add_all_plugin_dirs(basedir)
+ AnsibleCollectionConfig.playbook_paths = basedir
+ default_collection = _get_collection_name_from_path(basedir)
+ if default_collection:
+ display.warning(u'running with default collection {0}'.format(default_collection))
+ AnsibleCollectionConfig.default_collection = default_collection
+
+ vault_ids = list(options['vault_ids'])
+ default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
+ vault_ids = default_vault_ids + vault_ids
+
+ vault_secrets = CLI.setup_vault_secrets(loader,
+ vault_ids=vault_ids,
+ vault_password_files=list(options['vault_password_files']),
+ ask_vault_pass=options['ask_vault_pass'],
+ auto_prompt=False)
+ loader.set_vault_secrets(vault_secrets)
+
+ # create the inventory, and filter it based on the subset specified (if any)
+ inventory = InventoryManager(loader=loader, sources=options['inventory'], cache=(not options.get('flush_cache')))
+
+ # create the variable manager, which will be shared throughout
+ # the code, ensuring a consistent view of global variables
+ variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
+
+ return loader, inventory, variable_manager
+
+ @staticmethod
+ def get_host_list(inventory, subset, pattern='all'):
+
+ no_hosts = False
+ if len(inventory.list_hosts()) == 0:
+ # Empty inventory
+ if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
+ display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
+ no_hosts = True
+
+ inventory.subset(subset)
+
+ hosts = inventory.list_hosts(pattern)
+ if not hosts and no_hosts is False:
+ raise AnsibleError("Specified inventory, host pattern and/or --limit leaves us with no hosts to target.")
+
+ return hosts
+
+ @staticmethod
+ def get_password_from_file(pwd_file):
+
+ b_pwd_file = to_bytes(pwd_file)
+ secret = None
+ if b_pwd_file == b'-':
+ # ensure its read as bytes
+ secret = sys.stdin.buffer.read()
+
+ elif not os.path.exists(b_pwd_file):
+ raise AnsibleError("The password file %s was not found" % pwd_file)
+
+ elif is_executable(b_pwd_file):
+ display.vvvv(u'The password file %s is a script.' % to_text(pwd_file))
+ cmd = [b_pwd_file]
+
+ try:
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("Problem occured when trying to run the password script %s (%s)."
+ " If this is not a script, remove the executable bit from the file." % (pwd_file, e))
+
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("The password script %s returned an error (rc=%s): %s" % (pwd_file, p.returncode, stderr))
+ secret = stdout
+
+ else:
+ try:
+ f = open(b_pwd_file, "rb")
+ secret = f.read().strip()
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read password file %s: %s" % (pwd_file, e))
+
+ secret = secret.strip(b'\r\n')
+
+ if not secret:
+ raise AnsibleError('Empty password was provided from file (%s)' % pwd_file)
+
+ return to_unsafe_text(secret)
+
+ @classmethod
+ def cli_executor(cls, args=None):
+ if args is None:
+ args = sys.argv
+
+ try:
+ display.debug("starting run")
+
+ ansible_dir = Path(C.ANSIBLE_HOME).expanduser()
+ try:
+ ansible_dir.mkdir(mode=0o700)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ display.warning(
+ "Failed to create the directory '%s': %s" % (ansible_dir, to_text(exc, errors='surrogate_or_replace'))
+ )
+ else:
+ display.debug("Created the '%s' directory" % ansible_dir)
+
+ try:
+ args = [to_text(a, errors='surrogate_or_strict') for a in args]
+ except UnicodeError:
+ display.error('Command line args are not in utf-8, unable to continue. Ansible currently only understands utf-8')
+ display.display(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc()))
+ exit_code = 6
+ else:
+ cli = cls(args)
+ exit_code = cli.run()
+
+ except AnsibleOptionsError as e:
+ cli.parser.print_help()
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 5
+ except AnsibleParserError as e:
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 4
+ # TQM takes care of these, but leaving comment to reserve the exit codes
+ # except AnsibleHostUnreachable as e:
+ # display.error(str(e))
+ # exit_code = 3
+ # except AnsibleHostFailed as e:
+ # display.error(str(e))
+ # exit_code = 2
+ except AnsibleError as e:
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 1
+ except KeyboardInterrupt:
+ display.error("User interrupted execution")
+ exit_code = 99
+ except Exception as e:
+ if C.DEFAULT_DEBUG:
+ # Show raw stacktraces in debug mode, It also allow pdb to
+ # enter post mortem mode.
+ raise
+ have_cli_options = bool(context.CLIARGS)
+ display.error("Unexpected Exception, this is probably a bug: %s" % to_text(e), wrap_text=False)
+ if not have_cli_options or have_cli_options and context.CLIARGS['verbosity'] > 2:
+ log_only = False
+ if hasattr(e, 'orig_exc'):
+ display.vvv('\nexception type: %s' % to_text(type(e.orig_exc)))
+ why = to_text(e.orig_exc)
+ if to_text(e) != why:
+ display.vvv('\noriginal msg: %s' % why)
+ else:
+ display.display("to see the full traceback, use -vvv")
+ log_only = True
+ display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only)
+ exit_code = 250
+
+ sys.exit(exit_code)
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
new file mode 100755
index 0000000..e90b44c
--- /dev/null
+++ b/lib/ansible/cli/adhoc.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_text
+from ansible.parsing.splitter import parse_kv
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.playbook import Playbook
+from ansible.playbook.play import Play
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class AdHocCLI(CLI):
+ ''' is an extra-simple tool/framework/API for doing 'remote things'.
+ this command allows you to define and run a single task 'playbook' against a set of hosts
+ '''
+
+ name = 'ansible'
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+ super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
+ desc="Define and run a single task 'playbook' against a set of hosts",
+ epilog="Some actions do not make sense in Ad-Hoc (include, meta, etc)")
+
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_async_options(self.parser)
+ opt_help.add_output_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
+
+ # options unique to ansible ad-hoc
+ self.parser.add_argument('-a', '--args', dest='module_args',
+ help="The action's options in space separated k=v format: -a 'opt1=val1 opt2=val2' "
+ "or a json string: -a '{\"opt1\": \"val1\", \"opt2\": \"val2\"}'",
+ default=C.DEFAULT_MODULE_ARGS)
+ self.parser.add_argument('-m', '--module-name', dest='module_name',
+ help="Name of the action to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+ self.parser.add_argument('args', metavar='pattern', help='host pattern')
+
+ def post_process_args(self, options):
+ '''Post process and validate options for bin/ansible '''
+
+ options = super(AdHocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def _play_ds(self, pattern, async_val, poll):
+ check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
+
+ module_args_raw = context.CLIARGS['module_args']
+ module_args = None
+ if module_args_raw and module_args_raw.startswith('{') and module_args_raw.endswith('}'):
+ try:
+ module_args = from_yaml(module_args_raw.strip(), json_only=True)
+ except AnsibleParserError:
+ pass
+
+ if not module_args:
+ module_args = parse_kv(module_args_raw, check_raw=check_raw)
+
+ mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': module_args},
+ 'timeout': context.CLIARGS['task_timeout']}
+
+ # avoid adding to tasks that don't support it, unless set, then give user an error
+ if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
+ mytask['async_val'] = async_val
+ mytask['poll'] = poll
+
+ return dict(
+ name="Ansible Ad-Hoc",
+ hosts=pattern,
+ gather_facts='no',
+ tasks=[mytask])
+
+ def run(self):
+ ''' create and execute the single task playbook '''
+
+ super(AdHocCLI, self).run()
+
+ # only thing left should be host pattern
+ pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
+
+ # handle password prompts
+ sshpass = None
+ becomepass = None
+
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # get basic objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ # get list of hosts to execute against
+ try:
+ hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
+ except AnsibleError:
+ if context.CLIARGS['subset']:
+ raise
+ else:
+ hosts = []
+ display.warning("No hosts matched, nothing to do")
+
+ # just listing hosts?
+ if context.CLIARGS['listhosts']:
+ display.display(' hosts (%d):' % len(hosts))
+ for host in hosts:
+ display.display(' %s' % host)
+ return 0
+
+ # verify we have arguments if we know we need em
+ if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
+ err = "No argument passed to %s module" % context.CLIARGS['module_name']
+ if pattern.endswith(".yml"):
+ err = err + ' (did you mean to run ansible-playbook?)'
+ raise AnsibleOptionsError(err)
+
+ # Avoid modules that don't work with ad-hoc
+ if context.CLIARGS['module_name'] in C._ACTION_IMPORT_PLAYBOOK:
+ raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
+ % context.CLIARGS['module_name'])
+
+ # construct playbook objects to wrap task
+ play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
+ play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
+
+ # used in start callback
+ playbook = Playbook(loader)
+ playbook._entries.append(play)
+ playbook._file_name = '__adhoc_playbook__'
+
+ if self.callback:
+ cb = self.callback
+ elif context.CLIARGS['one_line']:
+ cb = 'oneline'
+ # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
+ elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
+ cb = C.DEFAULT_STDOUT_CALLBACK
+ else:
+ cb = 'minimal'
+
+ run_tree = False
+ if context.CLIARGS['tree']:
+ C.CALLBACKS_ENABLED.append('tree')
+ C.TREE_DIR = context.CLIARGS['tree']
+ run_tree = True
+
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ passwords=passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=run_tree,
+ forks=context.CLIARGS['forks'],
+ )
+
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', playbook)
+
+ result = self._tqm.run(play)
+
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if loader:
+ loader.cleanup_all_tmp_files()
+
+ return result
+
+
+def main(args=None):
+ AdHocCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/arguments/__init__.py b/lib/ansible/cli/arguments/__init__.py
new file mode 100644
index 0000000..7398e33
--- /dev/null
+++ b/lib/ansible/cli/arguments/__init__.py
@@ -0,0 +1,5 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/cli/arguments/option_helpers.py b/lib/ansible/cli/arguments/option_helpers.py
new file mode 100644
index 0000000..cb37d57
--- /dev/null
+++ b/lib/ansible/cli/arguments/option_helpers.py
@@ -0,0 +1,391 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import operator
+import argparse
+import os
+import os.path
+import sys
+import time
+
+from jinja2 import __version__ as j2_version
+
+import ansible
+from ansible import constants as C
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.yaml import HAS_LIBYAML, yaml_load
+from ansible.release import __version__
+from ansible.utils.path import unfrackpath
+
+
+#
+# Special purpose OptionParsers
+#
+class SortingHelpFormatter(argparse.HelpFormatter):
+ def add_arguments(self, actions):
+ actions = sorted(actions, key=operator.attrgetter('option_strings'))
+ super(SortingHelpFormatter, self).add_arguments(actions)
+
+
+class AnsibleVersion(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ ansible_version = to_native(version(getattr(parser, 'prog')))
+ print(ansible_version)
+ parser.exit()
+
+
+class UnrecognizedArgument(argparse.Action):
+ def __init__(self, option_strings, dest, const=True, default=None, required=False, help=None, metavar=None, nargs=0):
+ super(UnrecognizedArgument, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const,
+ default=default, required=required, help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.error('unrecognized arguments: %s' % option_string)
+
+
+class PrependListAction(argparse.Action):
+ """A near clone of ``argparse._AppendAction``, but designed to prepend list values
+ instead of appending.
+ """
+ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None,
+ choices=None, required=False, help=None, metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for append actions must be > 0; if arg '
+ 'strings are not supplying the value to append, '
+ 'the append const action may be more appropriate')
+ if const is not None and nargs != argparse.OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
+ super(PrependListAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar
+ )
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = copy.copy(ensure_value(namespace, self.dest, []))
+ items[0:0] = values
+ setattr(namespace, self.dest, items)
+
+
+def ensure_value(namespace, name, value):
+ if getattr(namespace, name, None) is None:
+ setattr(namespace, name, value)
+ return getattr(namespace, name)
+
+
+#
+# Callbacks to validate and normalize Options
+#
+def unfrack_path(pathsep=False, follow=True):
+ """Turn an Option's data into a single path in Ansible locations"""
+ def inner(value):
+ if pathsep:
+ return [unfrackpath(x, follow=follow) for x in value.split(os.pathsep) if x]
+
+ if value == '-':
+ return value
+
+ return unfrackpath(value, follow=follow)
+ return inner
+
+
+def maybe_unfrack_path(beacon):
+
+ def inner(value):
+ if value.startswith(beacon):
+ return beacon + unfrackpath(value[1:])
+ return value
+ return inner
+
+
+def _git_repo_info(repo_path):
+ """ returns a string containing git branch, commit id and commit date """
+ result = None
+ if os.path.exists(repo_path):
+ # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
+ if os.path.isfile(repo_path):
+ try:
+ with open(repo_path) as f:
+ gitdir = yaml_load(f).get('gitdir')
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path[:-4], gitdir)
+ except (IOError, AttributeError):
+ return ''
+ with open(os.path.join(repo_path, "HEAD")) as f:
+ line = f.readline().rstrip("\n")
+ if line.startswith("ref:"):
+ branch_path = os.path.join(repo_path, line[5:])
+ else:
+ branch_path = None
+ if branch_path and os.path.exists(branch_path):
+ branch = '/'.join(line.split('/')[2:])
+ with open(branch_path) as f:
+ commit = f.readline()[:10]
+ else:
+ # detached HEAD
+ commit = line[:10]
+ branch = 'detached HEAD'
+ branch_path = os.path.join(repo_path, "HEAD")
+
+ date = time.localtime(os.stat(branch_path).st_mtime)
+ if time.daylight == 0:
+ offset = time.timezone
+ else:
+ offset = time.altzone
+ result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
+ else:
+ result = ''
+ return result
+
+
+def _gitinfo():
+ basedir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
+ repo_path = os.path.join(basedir, '.git')
+ return _git_repo_info(repo_path)
+
+
+def version(prog=None):
+ """ return ansible version """
+ if prog:
+ result = ["{0} [core {1}]".format(prog, __version__)]
+ else:
+ result = [__version__]
+
+ gitinfo = _gitinfo()
+ if gitinfo:
+ result[0] = "{0} {1}".format(result[0], gitinfo)
+ result.append(" config file = %s" % C.CONFIG_FILE)
+ if C.DEFAULT_MODULE_PATH is None:
+ cpath = "Default w/o overrides"
+ else:
+ cpath = C.DEFAULT_MODULE_PATH
+ result.append(" configured module search path = %s" % cpath)
+ result.append(" ansible python module location = %s" % ':'.join(ansible.__path__))
+ result.append(" ansible collection location = %s" % ':'.join(C.COLLECTIONS_PATHS))
+ result.append(" executable location = %s" % sys.argv[0])
+ result.append(" python version = %s (%s)" % (''.join(sys.version.splitlines()), to_native(sys.executable)))
+ result.append(" jinja version = %s" % j2_version)
+ result.append(" libyaml = %s" % HAS_LIBYAML)
+ return "\n".join(result)
+
+
+#
+# Functions to add pre-canned options to an OptionParser
+#
+
+def create_base_parser(prog, usage="", desc=None, epilog=None):
+ """
+ Create an options parser for all ansible scripts
+ """
+ # base opts
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=SortingHelpFormatter,
+ epilog=epilog,
+ description=desc,
+ conflict_handler='resolve',
+ )
+ version_help = "show program's version number, config file location, configured module search path," \
+ " module location, executable location and exit"
+
+ parser.add_argument('--version', action=AnsibleVersion, nargs=0, help=version_help)
+ add_verbosity_options(parser)
+ return parser
+
+
+def add_verbosity_options(parser):
+ """Add options for verbosity"""
+ parser.add_argument('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
+ help="Causes Ansible to print more debug messages. Adding multiple -v will increase the verbosity, "
+ "the builtin plugins currently evaluate up to -vvvvvv. A reasonable level to start is -vvv, "
+ "connection debugging might require -vvvv.")
+
+
+def add_async_options(parser):
+ """Add options for commands which can launch async tasks"""
+ parser.add_argument('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type=int, dest='poll_interval',
+ help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
+ parser.add_argument('-B', '--background', dest='seconds', type=int, default=0,
+ help='run asynchronously, failing after X seconds (default=N/A)')
+
+
+def add_basedir_options(parser):
+ """Add options for commands which can set a playbook basedir"""
+ parser.add_argument('--playbook-dir', default=C.PLAYBOOK_DIR, dest='basedir', action='store',
+ help="Since this tool does not use playbooks, use this as a substitute playbook directory. "
+ "This sets the relative path for many features including roles/ group_vars/ etc.",
+ type=unfrack_path())
+
+
+def add_check_options(parser):
+ """Add options for commands which can run with diagnostic information of tasks"""
+ parser.add_argument("-C", "--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ parser.add_argument('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
+ parser.add_argument("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those"
+ " files; works great with --check")
+
+
+def add_connect_options(parser):
+ """Add options for commands which need to connection to other hosts"""
+ connect_group = parser.add_argument_group("Connection Options", "control as whom and how to connect to hosts")
+
+ connect_group.add_argument('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ help='use this file to authenticate the connection', type=unfrack_path())
+ connect_group.add_argument('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
+ connect_group.add_argument('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ connect_group.add_argument('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type=int, dest='timeout',
+ help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+
+ # ssh only
+ connect_group.add_argument('--ssh-common-args', default=None, dest='ssh_common_args',
+ help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
+ connect_group.add_argument('--sftp-extra-args', default=None, dest='sftp_extra_args',
+ help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
+ connect_group.add_argument('--scp-extra-args', default=None, dest='scp_extra_args',
+ help="specify extra arguments to pass to scp only (e.g. -l)")
+ connect_group.add_argument('--ssh-extra-args', default=None, dest='ssh_extra_args',
+ help="specify extra arguments to pass to ssh only (e.g. -R)")
+
+ parser.add_argument_group(connect_group)
+
+ connect_password_group = parser.add_mutually_exclusive_group()
+ connect_password_group.add_argument('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
+ help='ask for connection password')
+ connect_password_group.add_argument('--connection-password-file', '--conn-pass-file', default=C.CONNECTION_PASSWORD_FILE, dest='connection_password_file',
+ help="Connection password file", type=unfrack_path(), action='store')
+
+ parser.add_argument_group(connect_password_group)
+
+
+def add_fork_options(parser):
+ """Add options for commands that can fork worker processes"""
+ parser.add_argument('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type=int,
+ help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
+
+
+def add_inventory_options(parser):
+ """Add options for commands that utilize inventory"""
+ parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
+ help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
+ parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
+ help='outputs a list of matching hosts; does not execute anything else')
+ parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+
+
+def add_meta_options(parser):
+ """Add options for commands which can launch meta tasks from the command line"""
+ parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache for every host in inventory")
+
+
+def add_module_options(parser):
+ """Add options for commands that load modules"""
+ module_path = C.config.get_configuration_definition('DEFAULT_MODULE_PATH').get('default', '')
+ parser.add_argument('-M', '--module-path', dest='module_path', default=None,
+ help="prepend colon-separated path(s) to module library (default=%s)" % module_path,
+ type=unfrack_path(pathsep=True), action=PrependListAction)
+
+
+def add_output_options(parser):
+ """Add options for commands which can change their output"""
+ parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
+ help='condense output')
+ parser.add_argument('-t', '--tree', dest='tree', default=None,
+ help='log output to this directory')
+
+
+def add_runas_options(parser):
+ """
+ Add options for commands which can run tasks as another user
+
+ Note that this includes the options from add_runas_prompt_options(). Only one of these
+ functions should be used.
+ """
+ runas_group = parser.add_argument_group("Privilege Escalation Options", "control how and which user you become as on target hosts")
+
+ # consolidated privilege escalation (become)
+ runas_group.add_argument("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (does not imply password prompting)")
+ runas_group.add_argument('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
+ help='privilege escalation method to use (default=%s)' % C.DEFAULT_BECOME_METHOD +
+ ', use `ansible-doc -t become -l` to list valid choices.')
+ runas_group.add_argument('--become-user', default=None, dest='become_user', type=str,
+ help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
+
+ parser.add_argument_group(runas_group)
+
+ add_runas_prompt_options(parser)
+
+
+def add_runas_prompt_options(parser, runas_group=None):
+ """
+ Add options for commands which need to prompt for privilege escalation credentials
+
+ Note that add_runas_options() includes these options already. Only one of the two functions
+ should be used.
+ """
+ if runas_group is not None:
+ parser.add_argument_group(runas_group)
+
+ runas_pass_group = parser.add_mutually_exclusive_group()
+
+ runas_pass_group.add_argument('-K', '--ask-become-pass', dest='become_ask_pass', action='store_true',
+ default=C.DEFAULT_BECOME_ASK_PASS,
+ help='ask for privilege escalation password')
+ runas_pass_group.add_argument('--become-password-file', '--become-pass-file', default=C.BECOME_PASSWORD_FILE, dest='become_password_file',
+ help="Become password file", type=unfrack_path(), action='store')
+
+ parser.add_argument_group(runas_pass_group)
+
+
+def add_runtask_options(parser):
+ """Add options for commands that run a task"""
+ parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append", type=maybe_unfrack_path('@'),
+ help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
+
+
+def add_tasknoplay_options(parser):
+ """Add options for commands that run a task w/o a defined play"""
+ parser.add_argument('--task-timeout', type=int, dest="task_timeout", action="store", default=C.TASK_TIMEOUT,
+ help="set task timeout limit in seconds, must be positive integer.")
+
+
+def add_subset_options(parser):
+ """Add options for commands which can run a subset of tasks"""
+ parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
+ help="only run plays and tasks tagged with these values")
+ parser.add_argument('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
+ help="only run plays and tasks whose tags do not match these values")
+
+
+def add_vault_options(parser):
+ """Add options for loading vault files"""
+ parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str,
+ help='the vault identity to use')
+ base_group = parser.add_mutually_exclusive_group()
+ base_group.add_argument('--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
+ help='ask for vault password')
+ base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files',
+ help="vault password file", type=unfrack_path(follow=False), action='append')
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
new file mode 100755
index 0000000..3a5c242
--- /dev/null
+++ b/lib/ansible/cli/config.py
@@ -0,0 +1,551 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import yaml
+import shlex
+import subprocess
+
+from collections.abc import Mapping
+
+from ansible import context
+import ansible.plugins.loader as plugin_loader
+
+from ansible import constants as C
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.config.manager import ConfigManager, Setting
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.module_utils.common.json import json_dump
+from ansible.module_utils.six import string_types
+from ansible.parsing.quoting import is_quoted
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+def yaml_dump(data, default_flow_style=False, default_style=None):
+ return yaml.dump(data, Dumper=AnsibleDumper, default_flow_style=default_flow_style, default_style=default_style)
+
+
+def yaml_short(data):
+ return yaml_dump(data, default_flow_style=True, default_style="''")
+
+
+def get_constants():
+ ''' helper method to ensure we can template based on existing constants '''
+ if not hasattr(get_constants, 'cvars'):
+ get_constants.cvars = {k: getattr(C, k) for k in dir(C) if not k.startswith('__')}
+ return get_constants.cvars
+
+
+class ConfigCLI(CLI):
+ """ Config command line class """
+
+ name = 'ansible-config'
+
+ def __init__(self, args, callback=None):
+
+ self.config_file = None
+ self.config = None
+ super(ConfigCLI, self).__init__(args, callback)
+
+ def init_parser(self):
+
+ super(ConfigCLI, self).init_parser(
+ desc="View ansible configuration.",
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_verbosity_options(common)
+ common.add_argument('-c', '--config', dest='config_file',
+ help="path to configuration file, defaults to first file found in precedence.")
+ common.add_argument("-t", "--type", action="store", default='base', dest='type', choices=['all', 'base'] + list(C.CONFIGURABLE_PLUGINS),
+ help="Filter down to a specific plugin type.")
+ common.add_argument('args', help='Specific plugin to target, requires type of plugin to be set', nargs='*')
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
+ list_parser.set_defaults(func=self.execute_list)
+ list_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml'], default='yaml',
+ help='Output format for list')
+
+ dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
+ dump_parser.set_defaults(func=self.execute_dump)
+ dump_parser.add_argument('--only-changed', '--changed-only', dest='only_changed', action='store_true',
+ help="Only show configurations that have changed from the default")
+ dump_parser.add_argument('--format', '-f', dest='format', action='store', choices=['json', 'yaml', 'display'], default='display',
+ help='Output format for dump')
+
+ view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+
+ init_parser = subparsers.add_parser('init', help='Create initial configuration', parents=[common])
+ init_parser.set_defaults(func=self.execute_init)
+ init_parser.add_argument('--format', '-f', dest='format', action='store', choices=['ini', 'env', 'vars'], default='ini',
+ help='Output format for init')
+ init_parser.add_argument('--disabled', dest='commented', action='store_true', default=False,
+ help='Prefixes all entries with a comment character to disable them')
+
+ # search_parser = subparsers.add_parser('find', help='Search configuration')
+ # search_parser.set_defaults(func=self.execute_search)
+ # search_parser.add_argument('args', help='Search term', metavar='<search term>')
+
+ def post_process_args(self, options):
+ options = super(ConfigCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+
+ return options
+
+ def run(self):
+
+ super(ConfigCLI, self).run()
+
+ if context.CLIARGS['config_file']:
+ self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
+ b_config = to_bytes(self.config_file)
+ if os.path.exists(b_config) and os.access(b_config, os.R_OK):
+ self.config = ConfigManager(self.config_file)
+ else:
+ raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
+ else:
+ self.config = C.config
+ self.config_file = self.config._config_file
+
+ if self.config_file:
+ try:
+ if not os.path.exists(self.config_file):
+ raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
+ elif not os.path.isfile(self.config_file):
+ raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
+
+ os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
+ except Exception:
+ if context.CLIARGS['action'] in ['view']:
+ raise
+ elif context.CLIARGS['action'] in ['edit', 'update']:
+ display.warning("File does not exist, used empty file: %s" % self.config_file)
+
+ elif context.CLIARGS['action'] == 'view':
+ raise AnsibleError('Invalid or no config file was supplied')
+
+ # run the requested action
+ context.CLIARGS['func']()
+
+ def execute_update(self):
+ '''
+ Updates a single setting in the specified ansible.cfg
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ if context.CLIARGS['setting'] is None:
+ raise AnsibleOptionsError("update option requires a setting to update")
+
+ (entry, value) = context.CLIARGS['setting'].split('=')
+ if '.' in entry:
+ (section, option) = entry.split('.')
+ else:
+ section = 'defaults'
+ option = entry
+ subprocess.call([
+ 'ansible',
+ '-m', 'ini_file',
+ 'localhost',
+ '-c', 'local',
+ '-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
+ ])
+
+ def execute_view(self):
+ '''
+ Displays the current config file
+ '''
+ try:
+ with open(self.config_file, 'rb') as f:
+ self.pager(to_text(f.read(), errors='surrogate_or_strict'))
+ except Exception as e:
+ raise AnsibleError("Failed to open config file: %s" % to_native(e))
+
+ def execute_edit(self):
+ '''
+ Opens ansible.cfg in the default EDITOR
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ try:
+ editor = shlex.split(os.environ.get('EDITOR', 'vi'))
+ editor.append(self.config_file)
+ subprocess.call(editor)
+ except Exception as e:
+ raise AnsibleError("Failed to open editor: %s" % to_native(e))
+
+ def _list_plugin_settings(self, ptype, plugins=None):
+ entries = {}
+ loader = getattr(plugin_loader, '%s_loader' % ptype)
+
+ # build list
+ if plugins:
+ plugin_cs = []
+ for plugin in plugins:
+ p = loader.get(plugin, class_only=True)
+ if p is None:
+ display.warning("Skipping %s as we could not find matching plugin" % plugin)
+ else:
+ plugin_cs.append(p)
+ else:
+ plugin_cs = loader.all(class_only=True)
+
+ # iterate over class instances
+ for plugin in plugin_cs:
+ finalname = name = plugin._load_name
+ if name.startswith('_'):
+ # alias or deprecated
+ if os.path.islink(plugin._original_path):
+ continue
+ else:
+ finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
+
+ entries[finalname] = self.config.get_configuration_definitions(ptype, name)
+
+ return entries
+
+ def _list_entries_from_args(self):
+ '''
+ build a dict with the list requested configs
+ '''
+ config_entries = {}
+ if context.CLIARGS['type'] in ('base', 'all'):
+ # this dumps main/common configs
+ config_entries = self.config.get_configuration_definitions(ignore_private=True)
+
+ if context.CLIARGS['type'] != 'base':
+ config_entries['PLUGINS'] = {}
+
+ if context.CLIARGS['type'] == 'all':
+ # now each plugin type
+ for ptype in C.CONFIGURABLE_PLUGINS:
+ config_entries['PLUGINS'][ptype.upper()] = self._list_plugin_settings(ptype)
+ elif context.CLIARGS['type'] != 'base':
+ config_entries['PLUGINS'][context.CLIARGS['type']] = self._list_plugin_settings(context.CLIARGS['type'], context.CLIARGS['args'])
+
+ return config_entries
+
+ def execute_list(self):
+ '''
+ list and output available configs
+ '''
+
+ config_entries = self._list_entries_from_args()
+ if context.CLIARGS['format'] == 'yaml':
+ output = yaml_dump(config_entries)
+ elif context.CLIARGS['format'] == 'json':
+ output = json_dump(config_entries)
+
+ self.pager(to_text(output, errors='surrogate_or_strict'))
+
+ def _get_settings_vars(self, settings, subkey):
+
+ data = []
+ if context.CLIARGS['commented']:
+ prefix = '#'
+ else:
+ prefix = ''
+
+ for setting in settings:
+
+ if not settings[setting].get('description'):
+ continue
+
+ default = settings[setting].get('default', '')
+ if subkey == 'env':
+ stype = settings[setting].get('type', '')
+ if stype == 'boolean':
+ if default:
+ default = '1'
+ else:
+ default = '0'
+ elif default:
+ if stype == 'list':
+ if not isinstance(default, string_types):
+ # python lists are not valid env ones
+ try:
+ default = ', '.join(default)
+ except Exception as e:
+ # list of other stuff
+ default = '%s' % to_native(default)
+ if isinstance(default, string_types) and not is_quoted(default):
+ default = shlex.quote(default)
+ elif default is None:
+ default = ''
+
+ if subkey in settings[setting] and settings[setting][subkey]:
+ entry = settings[setting][subkey][-1]['name']
+ if isinstance(settings[setting]['description'], string_types):
+ desc = settings[setting]['description']
+ else:
+ desc = '\n#'.join(settings[setting]['description'])
+ name = settings[setting].get('name', setting)
+ data.append('# %s(%s): %s' % (name, settings[setting].get('type', 'string'), desc))
+
+ # TODO: might need quoting and value coercion depending on type
+ if subkey == 'env':
+ if entry.startswith('_ANSIBLE_'):
+ continue
+ data.append('%s%s=%s' % (prefix, entry, default))
+ elif subkey == 'vars':
+ if entry.startswith('_ansible_'):
+ continue
+ data.append(prefix + '%s: %s' % (entry, to_text(yaml_short(default), errors='surrogate_or_strict')))
+ data.append('')
+
+ return data
+
+ def _get_settings_ini(self, settings):
+
+ sections = {}
+ for o in sorted(settings.keys()):
+
+ opt = settings[o]
+
+ if not isinstance(opt, Mapping):
+ # recursed into one of the few settings that is a mapping, now hitting it's strings
+ continue
+
+ if not opt.get('description'):
+ # its a plugin
+ new_sections = self._get_settings_ini(opt)
+ for s in new_sections:
+ if s in sections:
+ sections[s].extend(new_sections[s])
+ else:
+ sections[s] = new_sections[s]
+ continue
+
+ if isinstance(opt['description'], string_types):
+ desc = '# (%s) %s' % (opt.get('type', 'string'), opt['description'])
+ else:
+ desc = "# (%s) " % opt.get('type', 'string')
+ desc += "\n# ".join(opt['description'])
+
+ if 'ini' in opt and opt['ini']:
+ entry = opt['ini'][-1]
+ if entry['section'] not in sections:
+ sections[entry['section']] = []
+
+ default = opt.get('default', '')
+ if opt.get('type', '') == 'list' and not isinstance(default, string_types):
+ # python lists are not valid ini ones
+ default = ', '.join(default)
+ elif default is None:
+ default = ''
+
+ if context.CLIARGS['commented']:
+ entry['key'] = ';%s' % entry['key']
+
+ key = desc + '\n%s=%s' % (entry['key'], default)
+ sections[entry['section']].append(key)
+
+ return sections
+
+ def execute_init(self):
+
+ data = []
+ config_entries = self._list_entries_from_args()
+ plugin_types = config_entries.pop('PLUGINS', None)
+
+ if context.CLIARGS['format'] == 'ini':
+ sections = self._get_settings_ini(config_entries)
+
+ if plugin_types:
+ for ptype in plugin_types:
+ plugin_sections = self._get_settings_ini(plugin_types[ptype])
+ for s in plugin_sections:
+ if s in sections:
+ sections[s].extend(plugin_sections[s])
+ else:
+ sections[s] = plugin_sections[s]
+
+ if sections:
+ for section in sections.keys():
+ data.append('[%s]' % section)
+ for key in sections[section]:
+ data.append(key)
+ data.append('')
+ data.append('')
+
+ elif context.CLIARGS['format'] in ('env', 'vars'): # TODO: add yaml once that config option is added
+ data = self._get_settings_vars(config_entries, context.CLIARGS['format'])
+ if plugin_types:
+ for ptype in plugin_types:
+ for plugin in plugin_types[ptype].keys():
+ data.extend(self._get_settings_vars(plugin_types[ptype][plugin], context.CLIARGS['format']))
+
+ self.pager(to_text('\n'.join(data), errors='surrogate_or_strict'))
+
+ def _render_settings(self, config):
+
+ entries = []
+ for setting in sorted(config):
+ changed = (config[setting].origin not in ('default', 'REQUIRED'))
+
+ if context.CLIARGS['format'] == 'display':
+ if isinstance(config[setting], Setting):
+ # proceed normally
+ if config[setting].origin == 'default':
+ color = 'green'
+ elif config[setting].origin == 'REQUIRED':
+ # should include '_terms', '_input', etc
+ color = 'red'
+ else:
+ color = 'yellow'
+ msg = "%s(%s) = %s" % (setting, config[setting].origin, config[setting].value)
+ else:
+ color = 'green'
+ msg = "%s(%s) = %s" % (setting, 'default', config[setting].get('default'))
+
+ entry = stringc(msg, color)
+ else:
+ entry = {}
+ for key in config[setting]._fields:
+ entry[key] = getattr(config[setting], key)
+
+ if not context.CLIARGS['only_changed'] or changed:
+ entries.append(entry)
+
+ return entries
+
+ def _get_global_configs(self):
+ config = self.config.get_configuration_definitions(ignore_private=True).copy()
+ for setting in config.keys():
+ v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, variables=get_constants())
+ config[setting] = Setting(setting, v, o, None)
+
+ return self._render_settings(config)
+
+ def _get_plugin_configs(self, ptype, plugins):
+
+ # prep loading
+ loader = getattr(plugin_loader, '%s_loader' % ptype)
+
+ # acumulators
+ output = []
+ config_entries = {}
+
+ # build list
+ if plugins:
+ plugin_cs = []
+ for plugin in plugins:
+ p = loader.get(plugin, class_only=True)
+ if p is None:
+ display.warning("Skipping %s as we could not find matching plugin" % plugin)
+ else:
+ plugin_cs.append(loader.get(plugin, class_only=True))
+ else:
+ plugin_cs = loader.all(class_only=True)
+
+ for plugin in plugin_cs:
+ # in case of deprecastion they diverge
+ finalname = name = plugin._load_name
+ if name.startswith('_'):
+ if os.path.islink(plugin._original_path):
+ # skip alias
+ continue
+ # deprecated, but use 'nice name'
+ finalname = name.replace('_', '', 1) + ' (DEPRECATED)'
+
+ # default entries per plugin
+ config_entries[finalname] = self.config.get_configuration_definitions(ptype, name)
+
+ try:
+ # populate config entries by loading plugin
+ dump = loader.get(name, class_only=True)
+ except Exception as e:
+ display.warning('Skipping "%s" %s plugin, as we cannot load plugin to check config due to : %s' % (name, ptype, to_native(e)))
+ continue
+
+ # actually get the values
+ for setting in config_entries[finalname].keys():
+ try:
+ v, o = C.config.get_config_value_and_origin(setting, cfile=self.config_file, plugin_type=ptype, plugin_name=name, variables=get_constants())
+ except AnsibleError as e:
+ if to_text(e).startswith('No setting was provided for required configuration'):
+ v = None
+ o = 'REQUIRED'
+ else:
+ raise e
+
+ if v is None and o is None:
+ # not all cases will be error
+ o = 'REQUIRED'
+
+ config_entries[finalname][setting] = Setting(setting, v, o, None)
+
+ # pretty please!
+ results = self._render_settings(config_entries[finalname])
+ if results:
+ if context.CLIARGS['format'] == 'display':
+ # avoid header for empty lists (only changed!)
+ output.append('\n%s:\n%s' % (finalname, '_' * len(finalname)))
+ output.extend(results)
+ else:
+ output.append({finalname: results})
+
+ return output
+
+ def execute_dump(self):
+ '''
+ Shows the current settings, merges ansible.cfg if specified
+ '''
+ if context.CLIARGS['type'] == 'base':
+ # deal with base
+ output = self._get_global_configs()
+ elif context.CLIARGS['type'] == 'all':
+ # deal with base
+ output = self._get_global_configs()
+ # deal with plugins
+ for ptype in C.CONFIGURABLE_PLUGINS:
+ plugin_list = self._get_plugin_configs(ptype, context.CLIARGS['args'])
+ if context.CLIARGS['format'] == 'display':
+ if not context.CLIARGS['only_changed'] or plugin_list:
+ output.append('\n%s:\n%s' % (ptype.upper(), '=' * len(ptype)))
+ output.extend(plugin_list)
+ else:
+ if ptype in ('modules', 'doc_fragments'):
+ pname = ptype.upper()
+ else:
+ pname = '%s_PLUGINS' % ptype.upper()
+ output.append({pname: plugin_list})
+ else:
+ # deal with plugins
+ output = self._get_plugin_configs(context.CLIARGS['type'], context.CLIARGS['args'])
+
+ if context.CLIARGS['format'] == 'display':
+ text = '\n'.join(output)
+ if context.CLIARGS['format'] == 'yaml':
+ text = yaml_dump(output)
+ elif context.CLIARGS['format'] == 'json':
+ text = json_dump(output)
+
+ self.pager(to_text(text, errors='surrogate_or_strict'))
+
+
+def main(args=None):
+ ConfigCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
new file mode 100755
index 0000000..3125cc4
--- /dev/null
+++ b/lib/ansible/cli/console.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+# Copyright: (c) 2014, Nandor Sivok <dominis@haxor.hu>
+# Copyright: (c) 2016, Redhat Inc
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import atexit
+import cmd
+import getpass
+import readline
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook.play import Play
+from ansible.plugins.list import list_plugins
+from ansible.plugins.loader import module_loader, fragment_loader
+from ansible.utils import plugin_docs
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ConsoleCLI(CLI, cmd.Cmd):
+ '''
+ A REPL that allows for running ad-hoc tasks against a chosen inventory
+ from a nice shell with built-in tab completion (based on dominis'
+ ansible-shell).
+
+ It supports several commands, and you can modify its configuration at
+ runtime:
+
+ - `cd [pattern]`: change host/group (you can use host patterns eg.: app*.dc*:!app01*)
+ - `list`: list available hosts in the current path
+ - `list groups`: list groups included in the current path
+ - `become`: toggle the become flag
+ - `!`: forces shell module instead of the ansible module (!yum update -y)
+ - `verbosity [num]`: set the verbosity level
+ - `forks [num]`: set the number of forks
+ - `become_user [user]`: set the become_user
+ - `remote_user [user]`: set the remote_user
+ - `become_method [method]`: set the privilege escalation method
+ - `check [bool]`: toggle check mode
+ - `diff [bool]`: toggle diff mode
+ - `timeout [integer]`: set the timeout of tasks in seconds (0 to disable)
+ - `help [command/module]`: display documentation for the command or module
+ - `exit`: exit ansible-console
+ '''
+
+ name = 'ansible-console'
+ modules = [] # type: list[str] | None
+ ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
+ 'selecting hosts in inventory or any combination of the two separated by commas.'}
+
+ # use specific to console, but fallback to highlight for backwards compatibility
+ NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
+
+ def __init__(self, args):
+
+ super(ConsoleCLI, self).__init__(args)
+
+ self.intro = 'Welcome to the ansible console. Type help or ? to list commands.\n'
+
+ self.groups = []
+ self.hosts = []
+ self.pattern = None
+ self.variable_manager = None
+ self.loader = None
+ self.passwords = dict()
+
+ self.cwd = '*'
+
+ # Defaults for these are set from the CLI in run()
+ self.remote_user = None
+ self.become = None
+ self.become_user = None
+ self.become_method = None
+ self.check_mode = None
+ self.diff = None
+ self.forks = None
+ self.task_timeout = None
+ self.collections = None
+
+ cmd.Cmd.__init__(self)
+
+ def init_parser(self):
+ super(ConsoleCLI, self).init_parser(
+ desc="REPL console for executing Ansible tasks.",
+ epilog="This is not a live session/connection: each task is executed in the background and returns its results."
+ )
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_tasknoplay_options(self.parser)
+
+ # options unique to shell
+ self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+
+ def post_process_args(self, options):
+ options = super(ConsoleCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+ return options
+
+ def get_names(self):
+ return dir(self)
+
+ def cmdloop(self):
+ try:
+ cmd.Cmd.cmdloop(self)
+
+ except KeyboardInterrupt:
+ self.cmdloop()
+
+ except EOFError:
+ self.display("[Ansible-console was exited]")
+ self.do_exit(self)
+
+ def set_prompt(self):
+ login_user = self.remote_user or getpass.getuser()
+ self.selected = self.inventory.list_hosts(self.cwd)
+ prompt = "%s@%s (%d)[f:%s]" % (login_user, self.cwd, len(self.selected), self.forks)
+ if self.become and self.become_user in [None, 'root']:
+ prompt += "# "
+ color = C.COLOR_ERROR
+ else:
+ prompt += "$ "
+ color = self.NORMAL_PROMPT
+ self.prompt = stringc(prompt, color, wrap_nonvisible_chars=True)
+
+ def list_modules(self):
+ return list_plugins('module', self.collections)
+
+ def default(self, line, forceshell=False):
+ """ actually runs modules """
+ if line.startswith("#"):
+ return False
+
+ if not self.cwd:
+ display.error("No host found")
+ return False
+
+ # defaults
+ module = 'shell'
+ module_args = line
+
+ if forceshell is not True:
+ possible_module, *possible_args = line.split()
+ if module_loader.find_plugin(possible_module):
+ # we found module!
+ module = possible_module
+ if possible_args:
+ module_args = ' '.join(possible_args)
+ else:
+ module_args = ''
+
+ if self.callback:
+ cb = self.callback
+ elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
+ cb = C.DEFAULT_STDOUT_CALLBACK
+ else:
+ cb = 'minimal'
+
+ result = None
+ try:
+ check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
+ task = dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)), timeout=self.task_timeout)
+ play_ds = dict(
+ name="Ansible Shell",
+ hosts=self.cwd,
+ gather_facts='no',
+ tasks=[task],
+ remote_user=self.remote_user,
+ become=self.become,
+ become_user=self.become_user,
+ become_method=self.become_method,
+ check_mode=self.check_mode,
+ diff=self.diff,
+ collections=self.collections,
+ )
+ play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
+ except Exception as e:
+ display.error(u"Unable to build command: %s" % to_text(e))
+ return False
+
+ try:
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=self.inventory,
+ variable_manager=self.variable_manager,
+ loader=self.loader,
+ passwords=self.passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=False,
+ forks=self.forks,
+ )
+
+ result = self._tqm.run(play)
+ display.debug(result)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if self.loader:
+ self.loader.cleanup_all_tmp_files()
+
+ if result is None:
+ display.error("No hosts found")
+ return False
+ except KeyboardInterrupt:
+ display.error('User interrupted execution')
+ return False
+ except Exception as e:
+ if self.verbosity >= 3:
+ import traceback
+ display.v(traceback.format_exc())
+ display.error(to_text(e))
+ return False
+
+ def emptyline(self):
+ return
+
+ def do_shell(self, arg):
+ """
+ You can run shell commands through the shell module.
+
+ eg.:
+ shell ps uax | grep java | wc -l
+ shell killall python
+ shell halt -n
+
+ You can use the ! to force the shell module. eg.:
+ !ps aux | grep java | wc -l
+ """
+ self.default(arg, True)
+
+ def help_shell(self):
+ display.display("You can run shell commands through the shell module.")
+
+ def do_forks(self, arg):
+ """Set the number of forks"""
+ if arg:
+ try:
+ forks = int(arg)
+ except TypeError:
+ display.error('Invalid argument for "forks"')
+ self.usage_forks()
+
+ if forks > 0:
+ self.forks = forks
+ self.set_prompt()
+
+ else:
+ display.display('forks must be greater than or equal to 1')
+ else:
+ self.usage_forks()
+
+ def help_forks(self):
+ display.display("Set the number of forks to use per task")
+ self.usage_forks()
+
+ def usage_forks(self):
+ display.display('Usage: forks <number>')
+
+ do_serial = do_forks
+ help_serial = help_forks
+
+ def do_collections(self, arg):
+ """Set list of collections for 'short name' usage"""
+ if arg in ('', 'none'):
+ self.collections = None
+ elif not arg:
+ self.usage_collections()
+ else:
+ collections = arg.split(',')
+ for collection in collections:
+ if self.collections is None:
+ self.collections = []
+ self.collections.append(collection.strip())
+
+ if self.collections:
+ display.v('Collections name search is set to: %s' % ', '.join(self.collections))
+ else:
+ display.v('Collections name search is using defaults')
+
+ def help_collections(self):
+ display.display("Set the collection name search path when using short names for plugins")
+ self.usage_collections()
+
+ def usage_collections(self):
+ display.display('Usage: collections <collection1>[, <collection2> ...]\n Use empty quotes or "none" to reset to default.\n')
+
+ def do_verbosity(self, arg):
+ """Set verbosity level"""
+ if not arg:
+ display.display('Usage: verbosity <number>')
+ else:
+ try:
+ display.verbosity = int(arg)
+ display.v('verbosity level set to %s' % arg)
+ except (TypeError, ValueError) as e:
+ display.error('The verbosity must be a valid integer: %s' % to_text(e))
+
+ def help_verbosity(self):
+ display.display("Set the verbosity level, equivalent to -v for 1 and -vvvv for 4.")
+
+ def do_cd(self, arg):
+ """
+ Change active host/group. You can use hosts patterns as well eg.:
+ cd webservers
+ cd webservers:dbservers
+ cd webservers:!phoenix
+ cd webservers:&staging
+ cd webservers:dbservers:&staging:!phoenix
+ """
+ if not arg:
+ self.cwd = '*'
+ elif arg in '/*':
+ self.cwd = 'all'
+ elif self.inventory.get_hosts(arg):
+ self.cwd = arg
+ else:
+ display.display("no host matched")
+
+ self.set_prompt()
+
+ def help_cd(self):
+ display.display("Change active host/group. ")
+ self.usage_cd()
+
+ def usage_cd(self):
+ display.display("Usage: cd <group>|<host>|<host pattern>")
+
+ def do_list(self, arg):
+ """List the hosts in the current group"""
+ if not arg:
+ for host in self.selected:
+ display.display(host.name)
+ elif arg == 'groups':
+ for group in self.groups:
+ display.display(group)
+ else:
+ display.error('Invalid option passed to "list"')
+ self.help_list()
+
+ def help_list(self):
+ display.display("List the hosts in the current group or a list of groups if you add 'groups'.")
+
+ def do_become(self, arg):
+ """Toggle whether plays run with become"""
+ if arg:
+ self.become = boolean(arg, strict=False)
+ display.v("become changed to %s" % self.become)
+ self.set_prompt()
+ else:
+ display.display("Please specify become value, e.g. `become yes`")
+
+ def help_become(self):
+ display.display("Toggle whether the tasks are run with become")
+
+ def do_remote_user(self, arg):
+ """Given a username, set the remote user plays are run by"""
+ if arg:
+ self.remote_user = arg
+ self.set_prompt()
+ else:
+ display.display("Please specify a remote user, e.g. `remote_user root`")
+
+ def help_remote_user(self):
+ display.display("Set the user for use as login to the remote target")
+
+ def do_become_user(self, arg):
+ """Given a username, set the user that plays are run by when using become"""
+ if arg:
+ self.become_user = arg
+ else:
+ display.display("Please specify a user, e.g. `become_user jenkins`")
+ display.v("Current user is %s" % self.become_user)
+ self.set_prompt()
+
+ def help_become_user(self):
+ display.display("Set the user for use with privilege escalation (which remote user attempts to 'become' when become is enabled)")
+
+ def do_become_method(self, arg):
+ """Given a become_method, set the privilege escalation method when using become"""
+ if arg:
+ self.become_method = arg
+ display.v("become_method changed to %s" % self.become_method)
+ else:
+ display.display("Please specify a become_method, e.g. `become_method su`")
+ display.v("Current become_method is %s" % self.become_method)
+
+ def help_become_method(self):
+ display.display("Set the privilege escalation plugin to use when become is enabled")
+
+ def do_check(self, arg):
+ """Toggle whether plays run with check mode"""
+ if arg:
+ self.check_mode = boolean(arg, strict=False)
+ display.display("check mode changed to %s" % self.check_mode)
+ else:
+ display.display("Please specify check mode value, e.g. `check yes`")
+ display.v("check mode is currently %s." % self.check_mode)
+
+ def help_check(self):
+ display.display("Toggle check_mode for the tasks")
+
+ def do_diff(self, arg):
+ """Toggle whether plays run with diff"""
+ if arg:
+ self.diff = boolean(arg, strict=False)
+ display.display("diff mode changed to %s" % self.diff)
+ else:
+ display.display("Please specify a diff value , e.g. `diff yes`")
+ display.v("diff mode is currently %s" % self.diff)
+
+ def help_diff(self):
+ display.display("Toggle diff output for the tasks")
+
+ def do_timeout(self, arg):
+ """Set the timeout"""
+ if arg:
+ try:
+ timeout = int(arg)
+ if timeout < 0:
+ display.error('The timeout must be greater than or equal to 1, use 0 to disable')
+ else:
+ self.task_timeout = timeout
+ except (TypeError, ValueError) as e:
+ display.error('The timeout must be a valid positive integer, or 0 to disable: %s' % to_text(e))
+ else:
+ self.usage_timeout()
+
+ def help_timeout(self):
+ display.display("Set task timeout in seconds")
+ self.usage_timeout()
+
+ def usage_timeout(self):
+ display.display('Usage: timeout <seconds>')
+
+ def do_exit(self, args):
+ """Exits from the console"""
+ sys.stdout.write('\nAnsible-console was exited.\n')
+ return -1
+
+ def help_exit(self):
+ display.display("LEAVE!")
+
+ do_EOF = do_exit
+ help_EOF = help_exit
+
+ def helpdefault(self, module_name):
+ if module_name:
+ in_path = module_loader.find_plugin(module_name)
+ if in_path:
+ oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader)
+ if oc:
+ display.display(oc['short_description'])
+ display.display('Parameters:')
+ for opt in oc['options'].keys():
+ display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])
+ else:
+ display.error('No documentation found for %s.' % module_name)
+ else:
+ display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
+
+ def help_help(self):
+ display.warning("Don't be redundant!")
+
+ def complete_cd(self, text, line, begidx, endidx):
+ mline = line.partition(' ')[2]
+ offs = len(mline) - len(text)
+
+ if self.cwd in ('all', '*', '\\'):
+ completions = self.hosts + self.groups
+ else:
+ completions = [x.name for x in self.inventory.list_hosts(self.cwd)]
+
+ return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
+
+ def completedefault(self, text, line, begidx, endidx):
+ if line.split()[0] in self.list_modules():
+ mline = line.split(' ')[-1]
+ offs = len(mline) - len(text)
+ completions = self.module_args(line.split()[0])
+
+ return [s[offs:] + '=' for s in completions if s.startswith(mline)]
+
+ def module_args(self, module_name):
+ in_path = module_loader.find_plugin(module_name)
+ oc, a, _dummy1, _dummy2 = plugin_docs.get_docstring(in_path, fragment_loader, is_module=True)
+ return list(oc['options'].keys())
+
+ def run(self):
+
+ super(ConsoleCLI, self).run()
+
+ sshpass = None
+ becomepass = None
+
+ # hosts
+ self.pattern = context.CLIARGS['pattern']
+ self.cwd = self.pattern
+
+ # Defaults from the command line
+ self.remote_user = context.CLIARGS['remote_user']
+ self.become = context.CLIARGS['become']
+ self.become_user = context.CLIARGS['become_user']
+ self.become_method = context.CLIARGS['become_method']
+ self.check_mode = context.CLIARGS['check']
+ self.diff = context.CLIARGS['diff']
+ self.forks = context.CLIARGS['forks']
+ self.task_timeout = context.CLIARGS['task_timeout']
+
+ # set module path if needed
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ module_loader.add_directory(path)
+
+ # dynamically add 'cannonical' modules as commands, aliases coudld be used and dynamically loaded
+ self.modules = self.list_modules()
+ for module in self.modules:
+ setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
+ setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
+
+ (sshpass, becomepass) = self.ask_passwords()
+ self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ self.loader, self.inventory, self.variable_manager = self._play_prereqs()
+
+ hosts = self.get_host_list(self.inventory, context.CLIARGS['subset'], self.pattern)
+
+ self.groups = self.inventory.list_groups()
+ self.hosts = [x.name for x in hosts]
+
+ # This hack is to work around readline issues on a mac:
+ # http://stackoverflow.com/a/7116997/541202
+ if 'libedit' in readline.__doc__:
+ readline.parse_and_bind("bind ^I rl_complete")
+ else:
+ readline.parse_and_bind("tab: complete")
+
+ histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
+ try:
+ readline.read_history_file(histfile)
+ except IOError:
+ pass
+
+ atexit.register(readline.write_history_file, histfile)
+ self.set_prompt()
+ self.cmdloop()
+
+ def __getattr__(self, name):
+ ''' handle not found to populate dynamically a module function if module matching name exists '''
+ attr = None
+
+ if name.startswith('do_'):
+ module = name.replace('do_', '')
+ if module_loader.find_plugin(module):
+ setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))
+ attr = object.__getattr__(self, name)
+ elif name.startswith('help_'):
+ module = name.replace('help_', '')
+ if module_loader.find_plugin(module):
+ setattr(self, name, lambda module=module: self.helpdefault(module))
+ attr = object.__getattr__(self, name)
+
+ if attr is None:
+ raise AttributeError(f"{self.__class__} does not have a {name} attribute")
+
+ return attr
+
+
+def main(args=None):
+ ConsoleCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
new file mode 100755
index 0000000..8036530
--- /dev/null
+++ b/lib/ansible/cli/doc.py
@@ -0,0 +1,1393 @@
+#!/usr/bin/env python
+# Copyright: (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import pkgutil
+import os
+import os.path
+import re
+import textwrap
+import traceback
+
+import ansible.plugins.loader as plugin_loader
+
+from pathlib import Path
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.collections.list import list_collection_dirs
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError, AnsiblePluginNotFound
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common.json import json_dump
+from ansible.module_utils.common.yaml import yaml_dump
+from ansible.module_utils.compat import importlib
+from ansible.module_utils.six import string_types
+from ansible.parsing.plugin_docs import read_docstub
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.list import list_plugins
+from ansible.plugins.loader import action_loader, fragment_loader
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_plugin_docs, get_docstring, get_versioned_doclink
+
+display = Display()
+
+
+TARGET_OPTIONS = C.DOCUMENTABLE_PLUGINS + ('role', 'keyword',)
+PB_OBJECTS = ['Play', 'Role', 'Block', 'Task']
+PB_LOADED = {}
+SNIPPETS = ['inventory', 'lookup', 'module']
+
+
+def add_collection_plugins(plugin_list, plugin_type, coll_filter=None):
+ display.deprecated("add_collection_plugins method, use ansible.plugins.list functions instead.", version='2.17')
+ plugin_list.update(list_plugins(plugin_type, coll_filter))
+
+
+def jdump(text):
+ try:
+ display.display(json_dump(text))
+ except TypeError as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
+
+
+class RoleMixin(object):
+ """A mixin containing all methods relevant to role argument specification functionality.
+
+ Note: The methods for actual display of role data are not present here.
+ """
+
+ # Potential locations of the role arg spec file in the meta subdir, with main.yml
+ # having the lowest priority.
+ ROLE_ARGSPEC_FILES = ['argument_specs' + e for e in C.YAML_FILENAME_EXTENSIONS] + ["main" + e for e in C.YAML_FILENAME_EXTENSIONS]
+
+ def _load_argspec(self, role_name, collection_path=None, role_path=None):
+ """Load the role argument spec data from the source file.
+
+ :param str role_name: The name of the role for which we want the argspec data.
+ :param str collection_path: Path to the collection containing the role. This
+ will be None for standard roles.
+ :param str role_path: Path to the standard role. This will be None for
+ collection roles.
+
+ We support two files containing the role arg spec data: either meta/main.yml
+ or meta/argument_spec.yml. The argument_spec.yml file will take precedence
+ over the meta/main.yml file, if it exists. Data is NOT combined between the
+ two files.
+
+ :returns: A dict of all data underneath the ``argument_specs`` top-level YAML
+ key in the argspec data file. Empty dict is returned if there is no data.
+ """
+
+ if collection_path:
+ meta_path = os.path.join(collection_path, 'roles', role_name, 'meta')
+ elif role_path:
+ meta_path = os.path.join(role_path, 'meta')
+ else:
+ raise AnsibleError("A path is required to load argument specs for role '%s'" % role_name)
+
+ path = None
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(meta_path, specfile)
+ if os.path.exists(full_path):
+ path = full_path
+ break
+
+ if path is None:
+ return {}
+
+ try:
+ with open(path, 'r') as f:
+ data = from_yaml(f.read(), file_name=path)
+ if data is None:
+ data = {}
+ return data.get('argument_specs', {})
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("An error occurred while trying to read the file '%s': %s" % (path, to_native(e)), orig_exc=e)
+
+ def _find_all_normal_roles(self, role_paths, name_filters=None):
+ """Find all non-collection roles that have an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param role_paths: A tuple of one or more role paths. When a role with the same name
+ is found in multiple paths, only the first-found role is returned.
+ :param name_filters: A tuple of one or more role names used to filter the results.
+
+ :returns: A set of tuples consisting of: role name, full role path
+ """
+ found = set()
+ found_names = set()
+
+ for path in role_paths:
+ if not os.path.isdir(path):
+ continue
+
+ # Check each subdir for an argument spec file
+ for entry in os.listdir(path):
+ role_path = os.path.join(path, entry)
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(role_path, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None or entry in name_filters:
+ if entry not in found_names:
+ found.add((entry, role_path))
+ found_names.add(entry)
+ # select first-found
+ break
+ return found
+
+ def _find_all_collection_roles(self, name_filters=None, collection_filter=None):
+ """Find all collection roles with an argument spec file.
+
+ Note that argument specs do not actually need to exist within the spec file.
+
+ :param name_filters: A tuple of one or more role names used to filter the results. These
+ might be fully qualified with the collection name (e.g., community.general.roleA)
+ or not (e.g., roleA).
+
+ :param collection_filter: A string containing the FQCN of a collection which will be
+ used to limit results. This filter will take precedence over the name_filters.
+
+ :returns: A set of tuples consisting of: role name, collection name, collection path
+ """
+ found = set()
+ b_colldirs = list_collection_dirs(coll_filter=collection_filter)
+ for b_path in b_colldirs:
+ path = to_text(b_path, errors='surrogate_or_strict')
+ collname = _get_collection_name_from_path(b_path)
+
+ roles_dir = os.path.join(path, 'roles')
+ if os.path.exists(roles_dir):
+ for entry in os.listdir(roles_dir):
+
+ # Check all potential spec files
+ for specfile in self.ROLE_ARGSPEC_FILES:
+ full_path = os.path.join(roles_dir, entry, 'meta', specfile)
+ if os.path.exists(full_path):
+ if name_filters is None:
+ found.add((entry, collname, path))
+ else:
+ # Name filters might contain a collection FQCN or not.
+ for fqcn in name_filters:
+ if len(fqcn.split('.')) == 3:
+ (ns, col, role) = fqcn.split('.')
+ if '.'.join([ns, col]) == collname and entry == role:
+ found.add((entry, collname, path))
+ elif fqcn == entry:
+ found.add((entry, collname, path))
+ break
+ return found
+
+ def _build_summary(self, role, collection, argspec):
+ """Build a summary dict for a role.
+
+ Returns a simplified role arg spec containing only the role entry points and their
+ short descriptions, and the role collection name (if applicable).
+
+ :param role: The simple role name.
+ :param collection: The collection containing the role (None or empty string if N/A).
+ :param argspec: The complete role argspec data dict.
+
+ :returns: A tuple with the FQCN role name and a summary dict.
+ """
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ summary = {}
+ summary['collection'] = collection
+ summary['entry_points'] = {}
+ for ep in argspec.keys():
+ entry_spec = argspec[ep] or {}
+ summary['entry_points'][ep] = entry_spec.get('short_description', '')
+ return (fqcn, summary)
+
+ def _build_doc(self, role, path, collection, argspec, entry_point):
+ if collection:
+ fqcn = '.'.join([collection, role])
+ else:
+ fqcn = role
+ doc = {}
+ doc['path'] = path
+ doc['collection'] = collection
+ doc['entry_points'] = {}
+ for ep in argspec.keys():
+ if entry_point is None or ep == entry_point:
+ entry_spec = argspec[ep] or {}
+ doc['entry_points'][ep] = entry_spec
+
+ # If we didn't add any entry points (b/c of filtering), ignore this entry.
+ if len(doc['entry_points'].keys()) == 0:
+ doc = None
+
+ return (fqcn, doc)
+
+ def _create_role_list(self, fail_on_errors=True):
+ """Return a dict describing the listing of all roles with arg specs.
+
+ :param role_paths: A tuple of one or more role paths.
+
+ :returns: A dict indexed by role name, with 'collection' and 'entry_points' keys per role.
+
+ Example return:
+
+ results = {
+ 'roleA': {
+ 'collection': '',
+ 'entry_points': {
+ 'main': 'Short description for main'
+ }
+ },
+ 'a.b.c.roleB': {
+ 'collection': 'a.b.c',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ 'alternate': 'Short description for alternate entry point'
+ }
+ 'x.y.z.roleB': {
+ 'collection': 'x.y.z',
+ 'entry_points': {
+ 'main': 'Short description for main',
+ }
+ },
+ }
+ """
+ roles_path = self._get_roles_path()
+ collection_filter = self._get_collection_filter()
+ if not collection_filter:
+ roles = self._find_all_normal_roles(roles_path)
+ else:
+ roles = []
+ collroles = self._find_all_collection_roles(collection_filter=collection_filter)
+
+ result = {}
+
+ for role, role_path in roles:
+ try:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, summary = self._build_summary(role, '', argspec)
+ result[fqcn] = summary
+ except Exception as e:
+ if fail_on_errors:
+ raise
+ result[role] = {
+ 'error': 'Error while loading role argument spec: %s' % to_native(e),
+ }
+
+ for role, collection, collection_path in collroles:
+ try:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, summary = self._build_summary(role, collection, argspec)
+ result[fqcn] = summary
+ except Exception as e:
+ if fail_on_errors:
+ raise
+ result['%s.%s' % (collection, role)] = {
+ 'error': 'Error while loading role argument spec: %s' % to_native(e),
+ }
+
+ return result
+
+ def _create_role_doc(self, role_names, entry_point=None, fail_on_errors=True):
+ """
+ :param role_names: A tuple of one or more role names.
+ :param role_paths: A tuple of one or more role paths.
+ :param entry_point: A role entry point name for filtering.
+ :param fail_on_errors: When set to False, include errors in the JSON output instead of raising errors
+
+ :returns: A dict indexed by role name, with 'collection', 'entry_points', and 'path' keys per role.
+ """
+ roles_path = self._get_roles_path()
+ roles = self._find_all_normal_roles(roles_path, name_filters=role_names)
+ collroles = self._find_all_collection_roles(name_filters=role_names)
+
+ result = {}
+
+ for role, role_path in roles:
+ try:
+ argspec = self._load_argspec(role, role_path=role_path)
+ fqcn, doc = self._build_doc(role, role_path, '', argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+ except Exception as e: # pylint:disable=broad-except
+ result[role] = {
+ 'error': 'Error while processing role: %s' % to_native(e),
+ }
+
+ for role, collection, collection_path in collroles:
+ try:
+ argspec = self._load_argspec(role, collection_path=collection_path)
+ fqcn, doc = self._build_doc(role, collection_path, collection, argspec, entry_point)
+ if doc:
+ result[fqcn] = doc
+ except Exception as e: # pylint:disable=broad-except
+ result['%s.%s' % (collection, role)] = {
+ 'error': 'Error while processing role: %s' % to_native(e),
+ }
+
+ return result
+
+
+class DocCLI(CLI, RoleMixin):
+ ''' displays information on modules installed in Ansible libraries.
+ It displays a terse listing of plugins and their short descriptions,
+ provides a printout of their DOCUMENTATION strings,
+ and it can create a short "snippet" which can be pasted into a playbook. '''
+
+ name = 'ansible-doc'
+
+ # default ignore list for detailed views
+ IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description', 'now_date', 'plainexamples', 'returndocs', 'collection')
+
+ # Warning: If you add more elements here, you also need to add it to the docsite build (in the
+ # ansible-community/antsibull repo)
+ _ITALIC = re.compile(r"\bI\(([^)]+)\)")
+ _BOLD = re.compile(r"\bB\(([^)]+)\)")
+ _MODULE = re.compile(r"\bM\(([^)]+)\)")
+ _LINK = re.compile(r"\bL\(([^)]+), *([^)]+)\)")
+ _URL = re.compile(r"\bU\(([^)]+)\)")
+ _REF = re.compile(r"\bR\(([^)]+), *([^)]+)\)")
+ _CONST = re.compile(r"\bC\(([^)]+)\)")
+ _RULER = re.compile(r"\bHORIZONTALLINE\b")
+
+ # rst specific
+ _RST_NOTE = re.compile(r".. note::")
+ _RST_SEEALSO = re.compile(r".. seealso::")
+ _RST_ROLES = re.compile(r":\w+?:`")
+ _RST_DIRECTIVES = re.compile(r".. \w+?::")
+
+ def __init__(self, args):
+
+ super(DocCLI, self).__init__(args)
+ self.plugin_list = set()
+
+ @classmethod
+ def find_plugins(cls, path, internal, plugin_type, coll_filter=None):
+ display.deprecated("find_plugins method as it is incomplete/incorrect. use ansible.plugins.list functions instead.", version='2.17')
+ return list_plugins(plugin_type, coll_filter, [path]).keys()
+
+ @classmethod
+ def tty_ify(cls, text):
+
+ # general formatting
+ t = cls._ITALIC.sub(r"`\1'", text) # I(word) => `word'
+ t = cls._BOLD.sub(r"*\1*", t) # B(word) => *word*
+ t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = cls._URL.sub(r"\1", t) # U(word) => word
+ t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word <url>
+ t = cls._REF.sub(r"\1", t) # R(word, sphinx-ref) => word
+ t = cls._CONST.sub(r"`\1'", t) # C(word) => `word'
+ t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
+
+ # remove rst
+ t = cls._RST_SEEALSO.sub(r"See also:", t) # seealso to See also:
+ t = cls._RST_NOTE.sub(r"Note:", t) # .. note:: to note:
+ t = cls._RST_ROLES.sub(r"`", t) # remove :ref: and other tags, keep tilde to match ending one
+ t = cls._RST_DIRECTIVES.sub(r"", t) # remove .. stuff:: in general
+
+ return t
+
+ def init_parser(self):
+
+ coll_filter = 'A supplied argument will be used for filtering, can be a namespace or full collection name.'
+
+ super(DocCLI, self).init_parser(
+ desc="plugin documentation tool",
+ epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
+ )
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ # targets
+ self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
+
+ self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
+ help='Choose which plugin type (defaults to "module"). '
+ 'Available plugin types are : {0}'.format(TARGET_OPTIONS),
+ choices=TARGET_OPTIONS)
+
+ # formatting
+ self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
+ help='Change output into json format.')
+
+ # TODO: warn if not used with -t roles
+ # role-specific options
+ self.parser.add_argument("-r", "--roles-path", dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ type=opt_help.unfrack_path(pathsep=True),
+ action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles.')
+
+ # modifiers
+ exclusive = self.parser.add_mutually_exclusive_group()
+ # TODO: warn if not used with -t roles
+ exclusive.add_argument("-e", "--entry-point", dest="entry_point",
+ help="Select the entry point for role(s).")
+
+ # TODO: warn with --json as it is incompatible
+ exclusive.add_argument("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
+ help='Show playbook snippet for these plugin types: %s' % ', '.join(SNIPPETS))
+
+ # TODO: warn when arg/plugin is passed
+ exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
+ help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
+ exclusive.add_argument("-l", "--list", action="store_true", default=False, dest='list_dir',
+ help='List available plugins. %s' % coll_filter)
+ exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
+ help='**For internal use only** Dump json metadata for all entries, ignores other options.')
+
+ self.parser.add_argument("--no-fail-on-errors", action="store_true", default=False, dest='no_fail_on_errors',
+ help='**For internal use only** Only used for --metadata-dump. '
+ 'Do not fail on errors. Report the error message in the JSON instead.')
+
+ def post_process_args(self, options):
+ options = super(DocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ return options
+
+ def display_plugin_list(self, results):
+
+ # format for user
+ displace = max(len(x) for x in results.keys())
+ linelimit = display.columns - displace - 5
+ text = []
+ deprecated = []
+
+ # format display per option
+ if context.CLIARGS['list_files']:
+ # list plugin file names
+ for plugin in sorted(results.keys()):
+ filename = to_native(results[plugin])
+
+ # handle deprecated for builtin/legacy
+ pbreak = plugin.split('.')
+ if pbreak[-1].startswith('_') and pbreak[0] == 'ansible' and pbreak[1] in ('builtin', 'legacy'):
+ pbreak[-1] = pbreak[-1][1:]
+ plugin = '.'.join(pbreak)
+ deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
+ else:
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
+ else:
+ # list plugin names and short desc
+ for plugin in sorted(results.keys()):
+ desc = DocCLI.tty_ify(results[plugin])
+
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ pbreak = plugin.split('.')
+ if pbreak[-1].startswith('_'): # Handle deprecated # TODO: add mark for deprecated collection plugins
+ pbreak[-1] = pbreak[-1][1:]
+ plugin = '.'.join(pbreak)
+ deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ def _display_available_roles(self, list_json):
+ """Display all roles we can find with a valid argument specification.
+
+ Output is: fqcn role name, entry point, short description
+ """
+ roles = list(list_json.keys())
+ entry_point_names = set()
+ for role in roles:
+ for entry_point in list_json[role]['entry_points'].keys():
+ entry_point_names.add(entry_point)
+
+ max_role_len = 0
+ max_ep_len = 0
+
+ if roles:
+ max_role_len = max(len(x) for x in roles)
+ if entry_point_names:
+ max_ep_len = max(len(x) for x in entry_point_names)
+
+ linelimit = display.columns - max_role_len - max_ep_len - 5
+ text = []
+
+ for role in sorted(roles):
+ for entry_point, desc in list_json[role]['entry_points'].items():
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+ text.append("%-*s %-*s %s" % (max_role_len, role,
+ max_ep_len, entry_point,
+ desc))
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ def _display_role_doc(self, role_json):
+ roles = list(role_json.keys())
+ text = []
+ for role in roles:
+ text += self.get_role_man_text(role, role_json[role])
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ @staticmethod
+ def _list_keywords():
+ return from_yaml(pkgutil.get_data('ansible', 'keyword_desc.yml'))
+
+ @staticmethod
+ def _get_keywords_docs(keys):
+
+ data = {}
+ descs = DocCLI._list_keywords()
+ for key in keys:
+
+ if key.startswith('with_'):
+ # simplify loops, dont want to handle every with_<lookup> combo
+ keyword = 'loop'
+ elif key == 'async':
+ # cause async became reserved in python we had to rename internally
+ keyword = 'async_val'
+ else:
+ keyword = key
+
+ try:
+ # if no desc, typeerror raised ends this block
+ kdata = {'description': descs[key]}
+
+ # get playbook objects for keyword and use first to get keyword attributes
+ kdata['applies_to'] = []
+ for pobj in PB_OBJECTS:
+ if pobj not in PB_LOADED:
+ obj_class = 'ansible.playbook.%s' % pobj.lower()
+ loaded_class = importlib.import_module(obj_class)
+ PB_LOADED[pobj] = getattr(loaded_class, pobj, None)
+
+ if keyword in PB_LOADED[pobj].fattributes:
+ kdata['applies_to'].append(pobj)
+
+ # we should only need these once
+ if 'type' not in kdata:
+
+ fa = PB_LOADED[pobj].fattributes.get(keyword)
+ if getattr(fa, 'private'):
+ kdata = {}
+ raise KeyError
+
+ kdata['type'] = getattr(fa, 'isa', 'string')
+
+ if keyword.endswith('when') or keyword in ('until',):
+ # TODO: make this a field attribute property,
+ # would also helps with the warnings on {{}} stacking
+ kdata['template'] = 'implicit'
+ elif getattr(fa, 'static'):
+ kdata['template'] = 'static'
+ else:
+ kdata['template'] = 'explicit'
+
+ # those that require no processing
+ for visible in ('alias', 'priority'):
+ kdata[visible] = getattr(fa, visible)
+
+ # remove None keys
+ for k in list(kdata.keys()):
+ if kdata[k] is None:
+ del kdata[k]
+
+ data[key] = kdata
+
+ except (AttributeError, KeyError) as e:
+ display.warning("Skipping Invalid keyword '%s' specified: %s" % (key, to_text(e)))
+ if display.verbosity >= 3:
+ display.verbose(traceback.format_exc())
+
+ return data
+
+ def _get_collection_filter(self):
+
+ coll_filter = None
+ if len(context.CLIARGS['args']) == 1:
+ coll_filter = context.CLIARGS['args'][0]
+ if not AnsibleCollectionRef.is_valid_collection_name(coll_filter):
+ raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_filter))
+ elif len(context.CLIARGS['args']) > 1:
+ raise AnsibleOptionsError("Only a single collection filter is supported.")
+
+ return coll_filter
+
+ def _list_plugins(self, plugin_type, content):
+
+ results = {}
+ self.plugins = {}
+ loader = DocCLI._prep_loader(plugin_type)
+
+ coll_filter = self._get_collection_filter()
+ self.plugins.update(list_plugins(plugin_type, coll_filter))
+
+ # get appropriate content depending on option
+ if content == 'dir':
+ results = self._get_plugin_list_descriptions(loader)
+ elif content == 'files':
+ results = {k: self.plugins[k][0] for k in self.plugins.keys()}
+ else:
+ results = {k: {} for k in self.plugins.keys()}
+ self.plugin_list = set() # reset for next iteration
+
+ return results
+
+ def _get_plugins_docs(self, plugin_type, names, fail_ok=False, fail_on_errors=True):
+
+ loader = DocCLI._prep_loader(plugin_type)
+
+ # get the docs for plugins in the command line list
+ plugin_docs = {}
+ for plugin in names:
+ doc = {}
+ try:
+ doc, plainexamples, returndocs, metadata = get_plugin_docs(plugin, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
+ except AnsiblePluginNotFound as e:
+ display.warning(to_native(e))
+ continue
+ except Exception as e:
+ if not fail_on_errors:
+ plugin_docs[plugin] = {'error': 'Missing documentation or could not parse documentation: %s' % to_native(e)}
+ continue
+ display.vvv(traceback.format_exc())
+ msg = "%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, to_native(e))
+ if fail_ok:
+ display.warning(msg)
+ else:
+ raise AnsibleError(msg)
+
+ if not doc:
+ # The doc section existed but was empty
+ if not fail_on_errors:
+ plugin_docs[plugin] = {'error': 'No valid documentation found'}
+ continue
+
+ docs = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
+ if not fail_on_errors:
+ # Check whether JSON serialization would break
+ try:
+ json_dump(docs)
+ except Exception as e: # pylint:disable=broad-except
+ plugin_docs[plugin] = {'error': 'Cannot serialize documentation as JSON: %s' % to_native(e)}
+ continue
+
+ plugin_docs[plugin] = docs
+
+ return plugin_docs
+
+ def _get_roles_path(self):
+ '''
+ Add any 'roles' subdir in playbook dir to the roles search path.
+ And as a last resort, add the playbook dir itself. Order being:
+ - 'roles' subdir of playbook dir
+ - DEFAULT_ROLES_PATH (default in cliargs)
+ - playbook dir (basedir)
+ NOTE: This matches logic in RoleDefinition._load_role_path() method.
+ '''
+ roles_path = context.CLIARGS['roles_path']
+ if context.CLIARGS['basedir'] is not None:
+ subdir = os.path.join(context.CLIARGS['basedir'], "roles")
+ if os.path.isdir(subdir):
+ roles_path = (subdir,) + roles_path
+ roles_path = roles_path + (context.CLIARGS['basedir'],)
+ return roles_path
+
+ @staticmethod
+ def _prep_loader(plugin_type):
+ ''' return a plugint type specific loader '''
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+
+ # add to plugin paths from command line
+ if context.CLIARGS['basedir'] is not None:
+ loader.add_directory(context.CLIARGS['basedir'], with_subdir=True)
+
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ loader.add_directory(path)
+
+ # save only top level paths for errors
+ loader._paths = None # reset so we can use subdirs later
+
+ return loader
+
+ def run(self):
+
+ super(DocCLI, self).run()
+
+ basedir = context.CLIARGS['basedir']
+ plugin_type = context.CLIARGS['type'].lower()
+ do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
+ listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
+
+ if context.CLIARGS['list_files']:
+ content = 'files'
+ elif context.CLIARGS['list_dir']:
+ content = 'dir'
+ else:
+ content = None
+
+ docs = {}
+
+ if basedir:
+ AnsibleCollectionConfig.playbook_paths = basedir
+
+ if plugin_type not in TARGET_OPTIONS:
+ raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
+
+ if context.CLIARGS['dump']:
+ # we always dump all types, ignore restrictions
+ ptypes = TARGET_OPTIONS
+ docs['all'] = {}
+ for ptype in ptypes:
+
+ no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
+ if ptype == 'role':
+ roles = self._create_role_list(fail_on_errors=no_fail)
+ docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
+ elif ptype == 'keyword':
+ names = DocCLI._list_keywords()
+ docs['all'][ptype] = DocCLI._get_keywords_docs(names.keys())
+ else:
+ plugin_names = self._list_plugins(ptype, None)
+ docs['all'][ptype] = self._get_plugins_docs(ptype, plugin_names, fail_ok=(ptype in ('test', 'filter')), fail_on_errors=no_fail)
+ # reset list after each type to avoid polution
+ elif listing:
+ if plugin_type == 'keyword':
+ docs = DocCLI._list_keywords()
+ elif plugin_type == 'role':
+ docs = self._create_role_list()
+ else:
+ docs = self._list_plugins(plugin_type, content)
+ else:
+ # here we require a name
+ if len(context.CLIARGS['args']) == 0:
+ raise AnsibleOptionsError("Missing name(s), incorrect options passed for detailed documentation.")
+
+ if plugin_type == 'keyword':
+ docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
+ elif plugin_type == 'role':
+ docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'])
+ else:
+ # display specific plugin docs
+ docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
+
+ # Display the docs
+ if do_json:
+ jdump(docs)
+ else:
+ text = []
+ if plugin_type in C.DOCUMENTABLE_PLUGINS:
+ if listing and docs:
+ self.display_plugin_list(docs)
+ elif context.CLIARGS['show_snippet']:
+ if plugin_type not in SNIPPETS:
+ raise AnsibleError('Snippets are only available for the following plugin'
+ ' types: %s' % ', '.join(SNIPPETS))
+
+ for plugin, doc_data in docs.items():
+ try:
+ textret = DocCLI.format_snippet(plugin, plugin_type, doc_data['doc'])
+ except ValueError as e:
+ display.warning("Unable to construct a snippet for"
+ " '{0}': {1}".format(plugin, to_text(e)))
+ else:
+ text.append(textret)
+ else:
+ # Some changes to how plain text docs are formatted
+ for plugin, doc_data in docs.items():
+
+ textret = DocCLI.format_plugin_doc(plugin, plugin_type,
+ doc_data['doc'], doc_data['examples'],
+ doc_data['return'], doc_data['metadata'])
+ if textret:
+ text.append(textret)
+ else:
+ display.warning("No valid documentation was retrieved from '%s'" % plugin)
+
+ elif plugin_type == 'role':
+ if context.CLIARGS['list_dir'] and docs:
+ self._display_available_roles(docs)
+ elif docs:
+ self._display_role_doc(docs)
+
+ elif docs:
+ text = DocCLI.tty_ify(DocCLI._dump_yaml(docs))
+
+ if text:
+ DocCLI.pager(''.join(text))
+
+ return 0
+
+ @staticmethod
+ def get_all_plugins_of_type(plugin_type):
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ paths = loader._get_paths_with_context()
+ plugins = {}
+ for path_context in paths:
+ plugins.update(list_plugins(plugin_type))
+ return sorted(plugins.keys())
+
+ @staticmethod
+ def get_plugin_metadata(plugin_type, plugin_name):
+ # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ result = loader.find_plugin_with_context(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
+ if not result.resolved:
+ raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
+ filename = result.plugin_resolved_path
+ collection_name = result.plugin_resolved_collection
+
+ try:
+ doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
+ collection_name=collection_name, plugin_type=plugin_type)
+ except Exception:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("%s %s at %s has a documentation formatting error or is missing documentation." % (plugin_type, plugin_name, filename))
+
+ if doc is None:
+ # Removed plugins don't have any documentation
+ return None
+
+ return dict(
+ name=plugin_name,
+ namespace=DocCLI.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
+ description=doc.get('short_description', "UNKNOWN"),
+ version_added=doc.get('version_added', "UNKNOWN")
+ )
+
+ @staticmethod
+ def namespace_from_plugin_filepath(filepath, plugin_name, basedir):
+ if not basedir.endswith('/'):
+ basedir += '/'
+ rel_path = filepath.replace(basedir, '')
+ extension_free = os.path.splitext(rel_path)[0]
+ namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
+ clean_ns = namespace_only.replace('/', '.')
+ if clean_ns == '':
+ clean_ns = None
+
+ return clean_ns
+
+ @staticmethod
+ def _combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ # generate extra data
+ if plugin_type == 'module':
+ # is there corresponding action plugin?
+ if plugin in action_loader:
+ doc['has_action'] = True
+ else:
+ doc['has_action'] = False
+
+ # return everything as one dictionary
+ return {'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata}
+
+ @staticmethod
+ def format_snippet(plugin, plugin_type, doc):
+ ''' return heavily commented plugin use to insert into play '''
+ if plugin_type == 'inventory' and doc.get('options', {}).get('plugin'):
+ # these do not take a yaml config that we can write a snippet for
+ raise ValueError('The {0} inventory plugin does not take YAML type config source'
+ ' that can be used with the "auto" plugin so a snippet cannot be'
+ ' created.'.format(plugin))
+
+ text = []
+
+ if plugin_type == 'lookup':
+ text = _do_lookup_snippet(doc)
+
+ elif 'options' in doc:
+ text = _do_yaml_snippet(doc)
+
+ text.append('')
+ return "\n".join(text)
+
+ @staticmethod
+ def format_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ collection_name = doc['collection']
+
+ # TODO: do we really want this?
+ # add_collection_to_versions_and_dates(doc, '(unknown)', is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(doc, collection_name, is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(
+ # returndocs, collection_name, is_module=(plugin_type == 'module'), return_docs=True)
+
+ # assign from other sections
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+ doc['metadata'] = metadata
+
+ try:
+ text = DocCLI.get_man_text(doc, collection_name, plugin_type)
+ except Exception as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)), orig_exc=e)
+
+ return text
+
+ def _get_plugin_list_descriptions(self, loader):
+
+ descs = {}
+ for plugin in self.plugins.keys():
+ # TODO: move to plugin itself i.e: plugin.get_desc()
+ doc = None
+ filename = Path(to_native(self.plugins[plugin][0]))
+ docerror = None
+ try:
+ doc = read_docstub(filename)
+ except Exception as e:
+ docerror = e
+
+ # plugin file was empty or had error, lets try other options
+ if doc is None:
+ # handle test/filters that are in file with diff name
+ base = plugin.split('.')[-1]
+ basefile = filename.with_name(base + filename.suffix)
+ for extension in C.DOC_EXTENSIONS:
+ docfile = basefile.with_suffix(extension)
+ try:
+ if docfile.exists():
+ doc = read_docstub(docfile)
+ except Exception as e:
+ docerror = e
+
+ if docerror:
+ display.warning("%s has a documentation formatting error: %s" % (plugin, docerror))
+ continue
+
+ if not doc or not isinstance(doc, dict):
+ desc = 'UNDOCUMENTED'
+ else:
+ desc = doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()
+
+ descs[plugin] = desc
+
+ return descs
+
+ @staticmethod
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths(subdirs=False):
+ i = to_text(i, errors='surrogate_or_strict')
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ @staticmethod
+ def _dump_yaml(struct, flow_style=False):
+ return yaml_dump(struct, default_flow_style=flow_style, default_style="''", Dumper=AnsibleDumper).rstrip('\n')
+
+ @staticmethod
+ def _indent_lines(text, indent):
+ return DocCLI.tty_ify('\n'.join([indent + line for line in text.split('\n')]))
+
+ @staticmethod
+ def _format_version_added(version_added, version_added_collection=None):
+ if version_added_collection == 'ansible.builtin':
+ version_added_collection = 'ansible-core'
+ # In ansible-core, version_added can be 'historical'
+ if version_added == 'historical':
+ return 'historical'
+ if version_added_collection:
+ version_added = '%s of %s' % (version_added, version_added_collection)
+ return 'version %s' % (version_added, )
+
+ @staticmethod
+ def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent=''):
+
+ for o in sorted(fields):
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ opt = dict(fields[o])
+
+ # required is used as indicator and removed
+ required = opt.pop('required', False)
+ if not isinstance(required, bool):
+ raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
+ if required:
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s%s %s" % (base_indent, opt_leadin, o))
+
+ # description is specifically formated and can either be string or list of strings
+ if 'description' not in opt:
+ raise AnsibleError("All (sub-)options and return values must have a 'description' field")
+ if is_sequence(opt['description']):
+ for entry_idx, entry in enumerate(opt['description'], 1):
+ if not isinstance(entry, string_types):
+ raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
+ text.append(textwrap.fill(DocCLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ else:
+ if not isinstance(opt['description'], string_types):
+ raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
+ text.append(textwrap.fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ del opt['description']
+
+ suboptions = []
+ for subkey in ('options', 'suboptions', 'contains', 'spec'):
+ if subkey in opt:
+ suboptions.append((subkey, opt.pop(subkey)))
+
+ if not required and not return_values and 'default' not in opt:
+ opt['default'] = None
+
+ # sanitize config items
+ conf = {}
+ for config in ('env', 'ini', 'yaml', 'vars', 'keyword'):
+ if config in opt and opt[config]:
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ conf[config] = [dict(item) for item in opt.pop(config)]
+ for ignore in DocCLI.IGNORE:
+ for item in conf[config]:
+ if ignore in item:
+ del item[ignore]
+
+ # reformat cli optoins
+ if 'cli' in opt and opt['cli']:
+ conf['cli'] = []
+ for cli in opt['cli']:
+ if 'option' not in cli:
+ conf['cli'].append({'name': cli['name'], 'option': '--%s' % cli['name'].replace('_', '-')})
+ else:
+ conf['cli'].append(cli)
+ del opt['cli']
+
+ # add custom header for conf
+ if conf:
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({'set_via': conf}), opt_indent))
+
+ # these we handle at the end of generic option processing
+ version_added = opt.pop('version_added', None)
+ version_added_collection = opt.pop('version_added_collection', None)
+
+ # general processing for options
+ for k in sorted(opt):
+ if k.startswith('_'):
+ continue
+
+ if is_sequence(opt[k]):
+ text.append(DocCLI._indent_lines('%s: %s' % (k, DocCLI._dump_yaml(opt[k], flow_style=True)), opt_indent))
+ else:
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k: opt[k]}), opt_indent))
+
+ if version_added:
+ text.append("%sadded in: %s\n" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection)))
+
+ for subkey, subdata in suboptions:
+ text.append('')
+ text.append("%s%s:\n" % (opt_indent, subkey.upper()))
+ DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent)
+ if not suboptions:
+ text.append('')
+
+ def get_role_man_text(self, role, role_json):
+ '''Generate text for the supplied role suitable for display.
+
+ This is similar to get_man_text(), but roles are different enough that we have
+ a separate method for formatting their display.
+
+ :param role: The role name.
+ :param role_json: The JSON for the given role as returned from _create_role_doc().
+
+ :returns: A array of text suitable for displaying to screen.
+ '''
+ text = []
+ opt_indent = " "
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ text.append("> %s (%s)\n" % (role.upper(), role_json.get('path')))
+
+ for entry_point in role_json['entry_points']:
+ doc = role_json['entry_points'][entry_point]
+
+ if doc.get('short_description'):
+ text.append("ENTRY POINT: %s - %s\n" % (entry_point, doc.get('short_description')))
+ else:
+ text.append("ENTRY POINT: %s\n" % entry_point)
+
+ if doc.get('description'):
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc['description'])
+ else:
+ desc = doc['description']
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc),
+ limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+ if doc.get('options'):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ if doc.get('attributes'):
+ text.append("ATTRIBUTES:\n")
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent))
+ text.append('')
+
+ # generic elements we will handle identically
+ for k in ('author',):
+ if k not in doc:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]),
+ limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ text.append('')
+
+ return text
+
+ @staticmethod
+ def get_man_text(doc, collection_name='', plugin_type=''):
+ # Create a copy so we don't modify the original
+ doc = dict(doc)
+
+ DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],)
+ opt_indent = " "
+ text = []
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ plugin_name = doc.get(context.CLIARGS['type'], doc.get('name')) or doc.get('plugin_type') or plugin_type
+ if collection_name:
+ plugin_name = '%s.%s' % (collection_name, plugin_name)
+
+ text.append("> %s (%s)\n" % (plugin_name.upper(), doc.pop('filename')))
+
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc.pop('description'))
+ else:
+ desc = doc.pop('description')
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if 'version_added' in doc:
+ version_added = doc.pop('version_added')
+ version_added_collection = doc.pop('version_added_collection', None)
+ text.append("ADDED IN: %s\n" % DocCLI._format_version_added(version_added, version_added_collection))
+
+ if doc.get('deprecated', False):
+ text.append("DEPRECATED: \n")
+ if isinstance(doc['deprecated'], dict):
+ if 'removed_at_date' in doc['deprecated']:
+ text.append(
+ "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')
+ )
+ else:
+ if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
+ doc['deprecated']['removed_in'] = doc['deprecated']['version']
+ text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ else:
+ text.append("%s" % doc.pop('deprecated'))
+ text.append("\n")
+
+ if doc.pop('has_action', False):
+ text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
+
+ if doc.get('options', False):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ if doc.get('attributes', False):
+ text.append("ATTRIBUTES:\n")
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc.pop('attributes')), opt_indent))
+ text.append('')
+
+ if doc.get('notes', False):
+ text.append("NOTES:")
+ for note in doc['notes']:
+ text.append(textwrap.fill(DocCLI.tty_ify(note), limit - 6,
+ initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append('')
+ text.append('')
+ del doc['notes']
+
+ if doc.get('seealso', False):
+ text.append("SEE ALSO:")
+ for item in doc['seealso']:
+ if 'module' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Module %s' % item['module']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ description = item.get('description', 'The official documentation on the %s module.' % item['module'])
+ text.append(textwrap.fill(DocCLI.tty_ify(description), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('modules/%s_module.html' % item['module'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
+ elif 'name' in item and 'link' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify(item['name']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['link']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ elif 'ref' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+
+ text.append('')
+ text.append('')
+ del doc['seealso']
+
+ if doc.get('requirements', False):
+ req = ", ".join(doc.pop('requirements'))
+ text.append("REQUIREMENTS:%s\n" % textwrap.fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
+
+ # Generic handler
+ for k in sorted(doc):
+ if k in DocCLI.IGNORE or not doc[k]:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
+ del doc[k]
+ text.append('')
+
+ if doc.get('plainexamples', False):
+ text.append("EXAMPLES:")
+ text.append('')
+ if isinstance(doc['plainexamples'], string_types):
+ text.append(doc.pop('plainexamples').strip())
+ else:
+ try:
+ text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
+ except Exception as e:
+ raise AnsibleParserError("Unable to parse examples section", orig_exc=e)
+ text.append('')
+ text.append('')
+
+ if doc.get('returndocs', False):
+ text.append("RETURN VALUES:")
+ DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True)
+
+ return "\n".join(text)
+
+
+def _do_yaml_snippet(doc):
+ text = []
+
+ mdesc = DocCLI.tty_ify(doc['short_description'])
+ module = doc.get('module')
+
+ if module:
+ # this is actually a usable task!
+ text.append("- name: %s" % (mdesc))
+ text.append(" %s:" % (module))
+ else:
+ # just a comment, hopefully useful yaml file
+ text.append("# %s:" % doc.get('plugin', doc.get('name')))
+
+ pad = 29
+ subdent = '# '.rjust(pad + 2)
+ limit = display.columns - pad
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ if isinstance(opt['description'], string_types):
+ desc = DocCLI.tty_ify(opt['description'])
+ else:
+ desc = DocCLI.tty_ify(" ".join(opt['description']))
+
+ required = opt.get('required', False)
+ if not isinstance(required, bool):
+ raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
+
+ o = '%s:' % o
+ if module:
+ if required:
+ desc = "(required) %s" % desc
+ text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
+ else:
+ if required:
+ default = '(required)'
+ else:
+ default = opt.get('default', 'None')
+
+ text.append("%s %-9s # %s" % (o, default, textwrap.fill(desc, limit, subsequent_indent=subdent, max_lines=3)))
+
+ return text
+
+
+def _do_lookup_snippet(doc):
+ text = []
+ snippet = "lookup('%s', " % doc.get('plugin', doc.get('name'))
+ comment = []
+
+ for o in sorted(doc['options'].keys()):
+
+ opt = doc['options'][o]
+ comment.append('# %s(%s): %s' % (o, opt.get('type', 'string'), opt.get('description', '')))
+ if o in ('_terms', '_raw', '_list'):
+ # these are 'list of arguments'
+ snippet += '< %s >' % (o)
+ continue
+
+ required = opt.get('required', False)
+ if not isinstance(required, bool):
+ raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
+
+ if required:
+ default = '<REQUIRED>'
+ else:
+ default = opt.get('default', 'None')
+
+ if opt.get('type') in ('string', 'str'):
+ snippet += ", %s='%s'" % (o, default)
+ else:
+ snippet += ', %s=%s' % (o, default)
+
+ snippet += ")"
+
+ if comment:
+ text.extend(comment)
+ text.append('')
+ text.append(snippet)
+
+ return text
+
+
+def main(args=None):
+ DocCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
new file mode 100755
index 0000000..3cb7fe2
--- /dev/null
+++ b/lib/ansible/cli/galaxy.py
@@ -0,0 +1,1865 @@
+#!/usr/bin/env python
+# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
+# Copyright: (c) 2018-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import json
+import os.path
+import re
+import shutil
+import sys
+import textwrap
+import time
+import typing as t
+
+from dataclasses import dataclass
+from yaml.error import YAMLError
+
+import ansible.constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.collection import (
+ build_collection,
+ download_collections,
+ find_existing_collections,
+ install_collections,
+ publish_collection,
+ validate_collection_name,
+ validate_collection_path,
+ verify_collections,
+ SIGNATURE_COUNT_RE,
+)
+from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+)
+from ansible.galaxy.collection.gpg import GPG_ERROR_MAP
+from ansible.galaxy.dependency_resolution.dataclasses import Requirement
+
+from ansible.galaxy.role import GalaxyRole
+from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.yaml import yaml_dump, yaml_load
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils import six
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_versioned_doclink
+
+display = Display()
+urlparse = six.moves.urllib.parse.urlparse
+
+# config definition by position: name, required, type
+SERVER_DEF = [
+ ('url', True, 'str'),
+ ('username', False, 'str'),
+ ('password', False, 'str'),
+ ('token', False, 'str'),
+ ('auth_url', False, 'str'),
+ ('v3', False, 'bool'),
+ ('validate_certs', False, 'bool'),
+ ('client_id', False, 'str'),
+ ('timeout', False, 'int'),
+]
+
+# config definition fields
+SERVER_ADDITIONAL = {
+ 'v3': {'default': 'False'},
+ 'validate_certs': {'cli': [{'name': 'validate_certs'}]},
+ 'timeout': {'default': '60', 'cli': [{'name': 'timeout'}]},
+ 'token': {'default': None},
+}
+
+
+def with_collection_artifacts_manager(wrapped_method):
+ """Inject an artifacts manager if not passed explicitly.
+
+ This decorator constructs a ConcreteArtifactsManager and maintains
+ the related temporary directory auto-cleanup around the target
+ method invocation.
+ """
+ def method_wrapper(*args, **kwargs):
+ if 'artifacts_manager' in kwargs:
+ return wrapped_method(*args, **kwargs)
+
+ # FIXME: use validate_certs context from Galaxy servers when downloading collections
+ artifacts_manager_kwargs = {'validate_certs': context.CLIARGS['resolved_validate_certs']}
+
+ keyring = context.CLIARGS.get('keyring', None)
+ if keyring is not None:
+ artifacts_manager_kwargs.update({
+ 'keyring': GalaxyCLI._resolve_path(keyring),
+ 'required_signature_count': context.CLIARGS.get('required_valid_signature_count', None),
+ 'ignore_signature_errors': context.CLIARGS.get('ignore_gpg_errors', None),
+ })
+
+ with ConcreteArtifactsManager.under_tmpdir(
+ C.DEFAULT_LOCAL_TMP,
+ **artifacts_manager_kwargs
+ ) as concrete_artifact_cm:
+ kwargs['artifacts_manager'] = concrete_artifact_cm
+ return wrapped_method(*args, **kwargs)
+ return method_wrapper
+
+
+def _display_header(path, h1, h2, w1=10, w2=7):
+ display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
+ path,
+ h1,
+ h2,
+ '-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
+ '-' * max([len(h2), w2]),
+ cwidth=w1,
+ vwidth=w2,
+ ))
+
+
+def _display_role(gr):
+ install_info = gr.install_info
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ display.display("- %s, %s" % (gr.name, version))
+
+
+def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
+ display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
+ fqcn=to_text(collection.fqcn),
+ version=collection.ver,
+ cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
+ vwidth=max(vwidth, min_vwidth)
+ ))
+
+
+def _get_collection_widths(collections):
+ if not is_iterable(collections):
+ collections = (collections, )
+
+ fqcn_set = {to_text(c.fqcn) for c in collections}
+ version_set = {to_text(c.ver) for c in collections}
+
+ fqcn_length = len(max(fqcn_set, key=len))
+ version_length = len(max(version_set, key=len))
+
+ return fqcn_length, version_length
+
+
+def validate_signature_count(value):
+ match = re.match(SIGNATURE_COUNT_RE, value)
+
+ if match is None:
+ raise ValueError(f"{value} is not a valid signature count value")
+
+ return value
+
+
+@dataclass
+class RoleDistributionServer:
+ _api: t.Union[GalaxyAPI, None]
+ api_servers: list[GalaxyAPI]
+
+ @property
+ def api(self):
+ if self._api:
+ return self._api
+
+ for server in self.api_servers:
+ try:
+ if u'v1' in server.available_api_versions:
+ self._api = server
+ break
+ except Exception:
+ continue
+
+ if not self._api:
+ self._api = self.api_servers[0]
+
+ return self._api
+
+
+class GalaxyCLI(CLI):
+ '''Command to manage Ansible roles and collections.
+
+ None of the CLI tools are designed to run concurrently with themselves.
+ Use an external scheduler and/or locking to ensure there are no clashing operations.
+ '''
+
+ name = 'ansible-galaxy'
+
+ SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
+
+ def __init__(self, args):
+ self._raw_args = args
+ self._implicit_role = False
+
+ if len(args) > 1:
+ # Inject role into sys.argv[1] as a backwards compatibility step
+ if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
+ # TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
+ args.insert(1, 'role')
+ self._implicit_role = True
+ # since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
+ if args[1:3] == ['role', 'login']:
+ display.error(
+ "The login command was removed in late 2020. An API key is now required to publish roles or collections "
+ "to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
+ "ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
+ "command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
+ sys.exit(1)
+
+ self.api_servers = []
+ self.galaxy = None
+ self.lazy_role_api = None
+ super(GalaxyCLI, self).__init__(args)
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(GalaxyCLI, self).init_parser(
+ desc="Perform various Role and Collection related operations.",
+ )
+
+ # Common arguments that apply to more than 1 action
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
+ common.add_argument('--token', '--api-key', dest='api_key',
+ help='The Ansible Galaxy API key which can be found at '
+ 'https://galaxy.ansible.com/me/preferences.')
+ common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs', help='Ignore SSL certificate validation errors.', default=None)
+ common.add_argument('--timeout', dest='timeout', type=int,
+ help="The time to wait for operations against the galaxy server, defaults to 60s.")
+
+ opt_help.add_verbosity_options(common)
+
+ force = opt_help.argparse.ArgumentParser(add_help=False)
+ force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role or collection')
+
+ github = opt_help.argparse.ArgumentParser(add_help=False)
+ github.add_argument('github_user', help='GitHub username')
+ github.add_argument('github_repo', help='GitHub repository')
+
+ offline = opt_help.argparse.ArgumentParser(add_help=False)
+ offline.add_argument('--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+
+ default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
+ roles_path = opt_help.argparse.ArgumentParser(add_help=False)
+ roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
+ default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles. The default is the first '
+ 'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
+
+ collections_path = opt_help.argparse.ArgumentParser(add_help=False)
+ collections_path.add_argument('-p', '--collections-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
+ default=AnsibleCollectionConfig.collection_paths,
+ action=opt_help.PrependListAction,
+ help="One or more directories to search for collections in addition "
+ "to the default COLLECTIONS_PATHS. Separate multiple paths "
+ "with '{0}'.".format(os.path.pathsep))
+
+ cache_options = opt_help.argparse.ArgumentParser(add_help=False)
+ cache_options.add_argument('--clear-response-cache', dest='clear_response_cache', action='store_true',
+ default=False, help='Clear the existing server response cache.')
+ cache_options.add_argument('--no-cache', dest='no_cache', action='store_true', default=False,
+ help='Do not use the server response cache.')
+
+ # Add sub parser for the Galaxy role type (role or collection)
+ type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
+ type_parser.required = True
+
+ # Add sub parser for the Galaxy collection actions
+ collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
+ collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
+ collection_parser.required = True
+ self.add_download_options(collection_parser, parents=[common, cache_options])
+ self.add_init_options(collection_parser, parents=[common, force])
+ self.add_build_options(collection_parser, parents=[common, force])
+ self.add_publish_options(collection_parser, parents=[common])
+ self.add_install_options(collection_parser, parents=[common, force, cache_options])
+ self.add_list_options(collection_parser, parents=[common, collections_path])
+ self.add_verify_options(collection_parser, parents=[common, collections_path])
+
+ # Add sub parser for the Galaxy role actions
+ role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
+ role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
+ role_parser.required = True
+ self.add_init_options(role_parser, parents=[common, force, offline])
+ self.add_remove_options(role_parser, parents=[common, roles_path])
+ self.add_delete_options(role_parser, parents=[common, github])
+ self.add_list_options(role_parser, parents=[common, roles_path])
+ self.add_search_options(role_parser, parents=[common])
+ self.add_import_options(role_parser, parents=[common, github])
+ self.add_setup_options(role_parser, parents=[common, roles_path])
+
+ self.add_info_options(role_parser, parents=[common, roles_path, offline])
+ self.add_install_options(role_parser, parents=[common, force, roles_path])
+
+ def add_download_options(self, parser, parents=None):
+ download_parser = parser.add_parser('download', parents=parents,
+ help='Download collections and their dependencies as a tarball for an '
+ 'offline install.')
+ download_parser.set_defaults(func=self.execute_download)
+
+ download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
+
+ download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download collection(s) listed as dependencies.")
+
+ download_parser.add_argument('-p', '--download-path', dest='download_path',
+ default='./collections',
+ help='The directory to download the collections to.')
+ download_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be downloaded.')
+ download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+
+ def add_init_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ init_parser = parser.add_parser('init', parents=parents,
+ help='Initialize new {0} with the base structure of a '
+ '{0}.'.format(galaxy_type))
+ init_parser.set_defaults(func=self.execute_init)
+
+ init_parser.add_argument('--init-path', dest='init_path', default='./',
+ help='The path in which the skeleton {0} will be created. The default is the '
+ 'current working directory.'.format(galaxy_type))
+ init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
+ default=C.GALAXY_COLLECTION_SKELETON if galaxy_type == 'collection' else C.GALAXY_ROLE_SKELETON,
+ help='The path to a {0} skeleton that the new {0} should be based '
+ 'upon.'.format(galaxy_type))
+
+ obj_name_kwargs = {}
+ if galaxy_type == 'collection':
+ obj_name_kwargs['type'] = validate_collection_name
+ init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
+ **obj_name_kwargs)
+
+ if galaxy_type == 'role':
+ init_parser.add_argument('--type', dest='role_type', action='store', default='default',
+ help="Initialize using an alternate role type. Valid types include: 'container', "
+ "'apb' and 'network'.")
+
+ def add_remove_options(self, parser, parents=None):
+ remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
+ remove_parser.set_defaults(func=self.execute_remove)
+
+ remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
+
+ def add_delete_options(self, parser, parents=None):
+ delete_parser = parser.add_parser('delete', parents=parents,
+ help='Removes the role from Galaxy. It does not remove or alter the actual '
+ 'GitHub repository.')
+ delete_parser.set_defaults(func=self.execute_delete)
+
+ def add_list_options(self, parser, parents=None):
+ galaxy_type = 'role'
+ if parser.metavar == 'COLLECTION_ACTION':
+ galaxy_type = 'collection'
+
+ list_parser = parser.add_parser('list', parents=parents,
+ help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
+
+ list_parser.set_defaults(func=self.execute_list)
+
+ list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
+
+ if galaxy_type == 'collection':
+ list_parser.add_argument('--format', dest='output_format', choices=('human', 'yaml', 'json'), default='human',
+ help="Format to display the list of collections in.")
+
+ def add_search_options(self, parser, parents=None):
+ search_parser = parser.add_parser('search', parents=parents,
+ help='Search the Galaxy database by tags, platforms, author and multiple '
+ 'keywords.')
+ search_parser.set_defaults(func=self.execute_search)
+
+ search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
+ search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
+ search_parser.add_argument('--author', dest='author', help='GitHub username')
+ search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
+
+ def add_import_options(self, parser, parents=None):
+ import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
+ import_parser.set_defaults(func=self.execute_import)
+
+ import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import results.")
+ import_parser.add_argument('--branch', dest='reference',
+ help='The name of a branch to import. Defaults to the repository\'s default branch '
+ '(usually master)')
+ import_parser.add_argument('--role-name', dest='role_name',
+ help='The name the role should have, if different than the repo name')
+ import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
+ help='Check the status of the most recent import request for given github_'
+ 'user/github_repo.')
+
+ def add_setup_options(self, parser, parents=None):
+ setup_parser = parser.add_parser('setup', parents=parents,
+ help='Manage the integration between Galaxy and the given source.')
+ setup_parser.set_defaults(func=self.execute_setup)
+
+ setup_parser.add_argument('--remove', dest='remove_id', default=None,
+ help='Remove the integration matching the provided ID value. Use --list to see '
+ 'ID values.')
+ setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
+ help='List all of your integrations.')
+ setup_parser.add_argument('source', help='Source')
+ setup_parser.add_argument('github_user', help='GitHub username')
+ setup_parser.add_argument('github_repo', help='GitHub repository')
+ setup_parser.add_argument('secret', help='Secret')
+
+ def add_info_options(self, parser, parents=None):
+ info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
+ info_parser.set_defaults(func=self.execute_info)
+
+ info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
+
+ def add_verify_options(self, parser, parents=None):
+ galaxy_type = 'collection'
+ verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
+ 'found on the server and the installed copy. This does not verify dependencies.')
+ verify_parser.set_defaults(func=self.execute_verify)
+
+ verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The installed collection(s) name. '
+ 'This is mutually exclusive with --requirements-file.')
+ verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors during verification and continue with the next specified collection.')
+ verify_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
+ help='Validate collection integrity locally without contacting server for '
+ 'canonical manifest hash.')
+ verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be verified.')
+ verify_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
+ verify_parser.add_argument('--signature', dest='signatures', action='append',
+ help='An additional signature source to verify the authenticity of the MANIFEST.json before using '
+ 'it to verify the rest of the contents of a collection from a Galaxy server. Use in '
+ 'conjunction with a positional collection name (mutually exclusive with --requirements-file).')
+ valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
+ 'or all to signify that all signatures must be used to verify the collection. ' \
+ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
+ ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
+ 'Provide this option multiple times to ignore a list of status codes. ' \
+ 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
+ verify_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ verify_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+
+ def add_install_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ args_kwargs = {}
+ if galaxy_type == 'collection':
+ args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
+ 'mutually exclusive with --requirements-file.'
+ ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
+ 'collection. This will not ignore dependency conflict errors.'
+ else:
+ args_kwargs['help'] = 'Role name, URL or tar file'
+ ignore_errors_help = 'Ignore errors and continue with the next specified role.'
+
+ install_parser = parser.add_parser('install', parents=parents,
+ help='Install {0}(s) from file(s), URL(s) or Ansible '
+ 'Galaxy'.format(galaxy_type))
+ install_parser.set_defaults(func=self.execute_install)
+
+ install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
+ install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help=ignore_errors_help)
+
+ install_exclusive = install_parser.add_mutually_exclusive_group()
+ install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download {0}s listed as dependencies.".format(galaxy_type))
+ install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
+ help="Force overwriting an existing {0} and its "
+ "dependencies.".format(galaxy_type))
+
+ valid_signature_count_help = 'The number of signatures that must successfully verify the collection. This should be a positive integer ' \
+ 'or -1 to signify that all signatures must be used to verify the collection. ' \
+ 'Prepend the value with + to fail if no valid signatures are found for the collection (e.g. +all).'
+ ignore_gpg_status_help = 'A status code to ignore during signature verification (for example, NO_PUBKEY). ' \
+ 'Provide this option multiple times to ignore a list of status codes. ' \
+ 'Descriptions for the choices can be seen at L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes).'
+
+ if galaxy_type == 'collection':
+ install_parser.add_argument('-p', '--collections-path', dest='collections_path',
+ default=self._get_default_collection_path(),
+ help='The path to the directory containing your collections.')
+ install_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be installed.')
+ install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+ install_parser.add_argument('-U', '--upgrade', dest='upgrade', action='store_true', default=False,
+ help='Upgrade installed collection artifacts. This will also update dependencies unless --no-deps is provided')
+ install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during signature verification') # Eventually default to ~/.ansible/pubring.kbx?
+ install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
+ default=C.GALAXY_DISABLE_GPG_VERIFY,
+ help='Disable GPG signature verification when installing collections from a Galaxy server')
+ install_parser.add_argument('--signature', dest='signatures', action='append',
+ help='An additional signature source to verify the authenticity of the MANIFEST.json before '
+ 'installing the collection from a Galaxy server. Use in conjunction with a positional '
+ 'collection name (mutually exclusive with --requirements-file).')
+ install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+ install_parser.add_argument('--offline', dest='offline', action='store_true', default=False,
+ help='Install collection artifacts (tarballs) without contacting any distribution servers. '
+ 'This does not apply to collections in remote Git repositories or URLs to remote tarballs.'
+ )
+ else:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of roles to be installed.')
+
+ r_re = re.compile(r'^(?<!-)-[a-zA-Z]*r[a-zA-Z]*') # -r, -fr
+ contains_r = bool([a for a in self._raw_args if r_re.match(a)])
+ role_file_re = re.compile(r'--role-file($|=)') # --role-file foo, --role-file=foo
+ contains_role_file = bool([a for a in self._raw_args if role_file_re.match(a)])
+ if self._implicit_role and (contains_r or contains_role_file):
+ # Any collections in the requirements files will also be installed
+ install_parser.add_argument('--keyring', dest='keyring', default=C.GALAXY_GPG_KEYRING,
+ help='The keyring used during collection signature verification')
+ install_parser.add_argument('--disable-gpg-verify', dest='disable_gpg_verify', action='store_true',
+ default=C.GALAXY_DISABLE_GPG_VERIFY,
+ help='Disable GPG signature verification when installing collections from a Galaxy server')
+ install_parser.add_argument('--required-valid-signature-count', dest='required_valid_signature_count', type=validate_signature_count,
+ help=valid_signature_count_help, default=C.GALAXY_REQUIRED_VALID_SIGNATURE_COUNT)
+ install_parser.add_argument('--ignore-signature-status-code', dest='ignore_gpg_errors', type=str, action='append',
+ help=ignore_gpg_status_help, default=C.GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES,
+ choices=list(GPG_ERROR_MAP.keys()))
+
+ install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
+ default=False,
+ help='Use tar instead of the scm archive option when packaging the role.')
+
+ def add_build_options(self, parser, parents=None):
+ build_parser = parser.add_parser('build', parents=parents,
+ help='Build an Ansible collection artifact that can be published to Ansible '
+ 'Galaxy.')
+ build_parser.set_defaults(func=self.execute_build)
+
+ build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
+ help='Path to the collection(s) directory to build. This should be the directory '
+ 'that contains the galaxy.yml file. The default is the current working '
+ 'directory.')
+ build_parser.add_argument('--output-path', dest='output_path', default='./',
+ help='The path in which the collection is built to. The default is the current '
+ 'working directory.')
+
+ def add_publish_options(self, parser, parents=None):
+ publish_parser = parser.add_parser('publish', parents=parents,
+ help='Publish a collection artifact to Ansible Galaxy.')
+ publish_parser.set_defaults(func=self.execute_publish)
+
+ publish_parser.add_argument('args', metavar='collection_path',
+ help='The path to the collection tarball to publish.')
+ publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import validation results.")
+ publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
+ help="The time to wait for the collection import process to finish.")
+
+ def post_process_args(self, options):
+ options = super(GalaxyCLI, self).post_process_args(options)
+
+ # ensure we have 'usable' cli option
+ setattr(options, 'validate_certs', (None if options.ignore_certs is None else not options.ignore_certs))
+ # the default if validate_certs is None
+ setattr(options, 'resolved_validate_certs', (options.validate_certs if options.validate_certs is not None else not C.GALAXY_IGNORE_CERTS))
+
+ display.verbosity = options.verbosity
+ return options
+
+ def run(self):
+
+ super(GalaxyCLI, self).run()
+
+ self.galaxy = Galaxy()
+
+ def server_config_def(section, key, required, option_type):
+ config_def = {
+ 'description': 'The %s of the %s Galaxy server' % (key, section),
+ 'ini': [
+ {
+ 'section': 'galaxy_server.%s' % section,
+ 'key': key,
+ }
+ ],
+ 'env': [
+ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
+ ],
+ 'required': required,
+ 'type': option_type,
+ }
+ if key in SERVER_ADDITIONAL:
+ config_def.update(SERVER_ADDITIONAL[key])
+
+ return config_def
+
+ galaxy_options = {}
+ for optional_key in ['clear_response_cache', 'no_cache', 'timeout']:
+ if optional_key in context.CLIARGS:
+ galaxy_options[optional_key] = context.CLIARGS[optional_key]
+
+ config_servers = []
+
+ # Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
+ server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
+ for server_priority, server_key in enumerate(server_list, start=1):
+ # Abuse the 'plugin config' by making 'galaxy_server' a type of plugin
+ # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
+ # section [galaxy_server.<server>] for the values url, username, password, and token.
+ config_dict = dict((k, server_config_def(server_key, k, req, ensure_type)) for k, req, ensure_type in SERVER_DEF)
+ defs = AnsibleLoader(yaml_dump(config_dict)).get_single_data()
+ C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
+
+ # resolve the config created options above with existing config and user options
+ server_options = C.config.get_plugin_options('galaxy_server', server_key)
+
+ # auth_url is used to create the token, but not directly by GalaxyAPI, so
+ # it doesn't need to be passed as kwarg to GalaxyApi, same for others we pop here
+ auth_url = server_options.pop('auth_url')
+ client_id = server_options.pop('client_id')
+ token_val = server_options['token'] or NoTokenSentinel
+ username = server_options['username']
+ v3 = server_options.pop('v3')
+ if server_options['validate_certs'] is None:
+ server_options['validate_certs'] = context.CLIARGS['resolved_validate_certs']
+ validate_certs = server_options['validate_certs']
+
+ if v3:
+ # This allows a user to explicitly indicate the server uses the /v3 API
+ # This was added for testing against pulp_ansible and I'm not sure it has
+ # a practical purpose outside of this use case. As such, this option is not
+ # documented as of now
+ server_options['available_api_versions'] = {'v3': '/v3'}
+
+ # default case if no auth info is provided.
+ server_options['token'] = None
+
+ if username:
+ server_options['token'] = BasicAuthToken(username, server_options['password'])
+ else:
+ if token_val:
+ if auth_url:
+ server_options['token'] = KeycloakToken(access_token=token_val,
+ auth_url=auth_url,
+ validate_certs=validate_certs,
+ client_id=client_id)
+ else:
+ # The galaxy v1 / github / django / 'Token'
+ server_options['token'] = GalaxyToken(token=token_val)
+
+ server_options.update(galaxy_options)
+ config_servers.append(GalaxyAPI(
+ self.galaxy, server_key,
+ priority=server_priority,
+ **server_options
+ ))
+
+ cmd_server = context.CLIARGS['api_server']
+ cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
+
+ validate_certs = context.CLIARGS['resolved_validate_certs']
+ if cmd_server:
+ # Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
+ # entry, otherwise create a new API entry for the server specified.
+ config_server = next((s for s in config_servers if s.name == cmd_server), None)
+ if config_server:
+ self.api_servers.append(config_server)
+ else:
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
+ priority=len(config_servers) + 1,
+ validate_certs=validate_certs,
+ **galaxy_options
+ ))
+ else:
+ self.api_servers = config_servers
+
+ # Default to C.GALAXY_SERVER if no servers were defined
+ if len(self.api_servers) == 0:
+ self.api_servers.append(GalaxyAPI(
+ self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
+ priority=0,
+ validate_certs=validate_certs,
+ **galaxy_options
+ ))
+
+ # checks api versions once a GalaxyRole makes an api call
+ # self.api can be used to evaluate the best server immediately
+ self.lazy_role_api = RoleDistributionServer(None, self.api_servers)
+
+ return context.CLIARGS['func']()
+
+ @property
+ def api(self):
+ return self.lazy_role_api.api
+
+ def _get_default_collection_path(self):
+ return C.COLLECTIONS_PATHS[0]
+
+ def _parse_requirements_file(self, requirements_file, allow_old_format=True, artifacts_manager=None, validate_signature_options=True):
+ """
+ Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
+ requirements file format:
+
+ # v1 (roles only)
+ - src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
+ name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
+ scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
+ version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
+ include: Path to additional requirements.yml files.
+
+ # v2 (roles and collections)
+ ---
+ roles:
+ # Same as v1 format just under the roles key
+
+ collections:
+ - namespace.collection
+ - name: namespace.collection
+ version: version identifier, multiple identifiers are separated by ','
+ source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
+ type: git|file|url|galaxy
+
+ :param requirements_file: The path to the requirements file.
+ :param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
+ :param artifacts_manager: Artifacts manager.
+ :return: a dict containing roles and collections to found in the requirements file.
+ """
+ requirements = {
+ 'roles': [],
+ 'collections': [],
+ }
+
+ b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
+ if not os.path.exists(b_requirements_file):
+ raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
+
+ display.vvv("Reading requirement file at '%s'" % requirements_file)
+ with open(b_requirements_file, 'rb') as req_obj:
+ try:
+ file_requirements = yaml_load(req_obj)
+ except YAMLError as err:
+ raise AnsibleError(
+ "Failed to parse the requirements yml at '%s' with the following error:\n%s"
+ % (to_native(requirements_file), to_native(err)))
+
+ if file_requirements is None:
+ raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
+
+ def parse_role_req(requirement):
+ if "include" not in requirement:
+ role = RoleRequirement.role_yaml_parse(requirement)
+ display.vvv("found role %s in yaml file" % to_text(role))
+ if "name" not in role and "src" not in role:
+ raise AnsibleError("Must specify name or src for role")
+ return [GalaxyRole(self.galaxy, self.lazy_role_api, **role)]
+ else:
+ b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
+ if not os.path.isfile(b_include_path):
+ raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
+ % (to_native(b_include_path), to_native(requirements_file)))
+
+ with open(b_include_path, 'rb') as f_include:
+ try:
+ return [GalaxyRole(self.galaxy, self.lazy_role_api, **r) for r in
+ (RoleRequirement.role_yaml_parse(i) for i in yaml_load(f_include))]
+ except Exception as e:
+ raise AnsibleError("Unable to load data from include requirements file: %s %s"
+ % (to_native(requirements_file), to_native(e)))
+
+ if isinstance(file_requirements, list):
+ # Older format that contains only roles
+ if not allow_old_format:
+ raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
+ "a list of collections to install")
+
+ for role_req in file_requirements:
+ requirements['roles'] += parse_role_req(role_req)
+
+ else:
+ # Newer format with a collections and/or roles key
+ extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
+ if extra_keys:
+ raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
+ "file. Found: %s" % (to_native(", ".join(extra_keys))))
+
+ for role_req in file_requirements.get('roles') or []:
+ requirements['roles'] += parse_role_req(role_req)
+
+ requirements['collections'] = [
+ Requirement.from_requirement_dict(
+ self._init_coll_req_dict(collection_req),
+ artifacts_manager,
+ validate_signature_options,
+ )
+ for collection_req in file_requirements.get('collections') or []
+ ]
+
+ return requirements
+
+ def _init_coll_req_dict(self, coll_req):
+ if not isinstance(coll_req, dict):
+ # Assume it's a string:
+ return {'name': coll_req}
+
+ if (
+ 'name' not in coll_req or
+ not coll_req.get('source') or
+ coll_req.get('type', 'galaxy') != 'galaxy'
+ ):
+ return coll_req
+
+ # Try and match up the requirement source with our list of Galaxy API
+ # servers defined in the config, otherwise create a server with that
+ # URL without any auth.
+ coll_req['source'] = next(
+ iter(
+ srvr for srvr in self.api_servers
+ if coll_req['source'] in {srvr.name, srvr.api_server}
+ ),
+ GalaxyAPI(
+ self.galaxy,
+ 'explicit_requirement_{name!s}'.format(
+ name=coll_req['name'],
+ ),
+ coll_req['source'],
+ validate_certs=context.CLIARGS['resolved_validate_certs'],
+ ),
+ )
+
+ return coll_req
+
+ @staticmethod
+ def exit_without_ignore(rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+ if not context.CLIARGS['ignore_errors']:
+ raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
+
+ @staticmethod
+ def _display_role_info(role_info):
+
+ text = [u"", u"Role: %s" % to_text(role_info['name'])]
+
+ # Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
+ galaxy_info = role_info.get('galaxy_info', {})
+ description = role_info.get('description', galaxy_info.get('description', ''))
+ text.append(u"\tdescription: %s" % description)
+
+ for k in sorted(role_info.keys()):
+
+ if k in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ text.append(u"\t%s:" % (k))
+ for key in sorted(role_info[k].keys()):
+ if key in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+ text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
+ else:
+ text.append(u"\t%s: %s" % (k, role_info[k]))
+
+ # make sure we have a trailing newline returned
+ text.append(u"")
+ return u'\n'.join(text)
+
+ @staticmethod
+ def _resolve_path(path):
+ return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
+
+ @staticmethod
+ def _get_skeleton_galaxy_yml(template_path, inject_data):
+ with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
+ meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
+
+ galaxy_meta = get_collections_galaxy_meta_info()
+
+ required_config = []
+ optional_config = []
+ for meta_entry in galaxy_meta:
+ config_list = required_config if meta_entry.get('required', False) else optional_config
+
+ value = inject_data.get(meta_entry['key'], None)
+ if not value:
+ meta_type = meta_entry.get('type', 'str')
+
+ if meta_type == 'str':
+ value = ''
+ elif meta_type == 'list':
+ value = []
+ elif meta_type == 'dict':
+ value = {}
+
+ meta_entry['value'] = value
+ config_list.append(meta_entry)
+
+ link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
+ const_pattern = re.compile(r"C\(([^)]+)\)")
+
+ def comment_ify(v):
+ if isinstance(v, list):
+ v = ". ".join([l.rstrip('.') for l in v])
+
+ v = link_pattern.sub(r"\1 <\2>", v)
+ v = const_pattern.sub(r"'\1'", v)
+
+ return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
+
+ loader = DataLoader()
+ templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
+ templar.environment.filters['comment_ify'] = comment_ify
+
+ meta_value = templar.template(meta_template)
+
+ return meta_value
+
+ def _require_one_of_collections_requirements(
+ self, collections, requirements_file,
+ signatures=None,
+ artifacts_manager=None,
+ ):
+ if collections and requirements_file:
+ raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
+ elif not collections and not requirements_file:
+ raise AnsibleError("You must specify a collection name or a requirements file.")
+ elif requirements_file:
+ if signatures is not None:
+ raise AnsibleError(
+ "The --signatures option and --requirements-file are mutually exclusive. "
+ "Use the --signatures with positional collection_name args or provide a "
+ "'signatures' key for requirements in the --requirements-file."
+ )
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ allow_old_format=False,
+ artifacts_manager=artifacts_manager,
+ )
+ else:
+ requirements = {
+ 'collections': [
+ Requirement.from_string(coll_input, artifacts_manager, signatures)
+ for coll_input in collections
+ ],
+ 'roles': [],
+ }
+ return requirements
+
+ ############################
+ # execute actions
+ ############################
+
+ def execute_role(self):
+ """
+ Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
+ as listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_collection(self):
+ """
+ Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
+ listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_build(self):
+ """
+ Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
+ By default, this command builds from the current working directory. You can optionally pass in the
+ collection input path (where the ``galaxy.yml`` file is).
+ """
+ force = context.CLIARGS['force']
+ output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+ elif os.path.isfile(b_output_path):
+ raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
+
+ for collection_path in context.CLIARGS['args']:
+ collection_path = GalaxyCLI._resolve_path(collection_path)
+ build_collection(
+ to_text(collection_path, errors='surrogate_or_strict'),
+ to_text(output_path, errors='surrogate_or_strict'),
+ force,
+ )
+
+ @with_collection_artifacts_manager
+ def execute_download(self, artifacts_manager=None):
+ collections = context.CLIARGS['args']
+ no_deps = context.CLIARGS['no_deps']
+ download_path = context.CLIARGS['download_path']
+
+ requirements_file = context.CLIARGS['requirements']
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ artifacts_manager=artifacts_manager,
+ )['collections']
+
+ download_path = GalaxyCLI._resolve_path(download_path)
+ b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_download_path):
+ os.makedirs(b_download_path)
+
+ download_collections(
+ requirements, download_path, self.api_servers, no_deps,
+ context.CLIARGS['allow_pre_release'],
+ artifacts_manager=artifacts_manager,
+ )
+
+ return 0
+
+ def execute_init(self):
+ """
+ Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
+ Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
+ """
+
+ galaxy_type = context.CLIARGS['type']
+ init_path = context.CLIARGS['init_path']
+ force = context.CLIARGS['force']
+ obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
+
+ obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
+
+ inject_data = dict(
+ description='your {0} description'.format(galaxy_type),
+ ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
+ )
+ if galaxy_type == 'role':
+ inject_data.update(dict(
+ author='your name',
+ company='your company (optional)',
+ license='license (GPL-2.0-or-later, MIT, etc)',
+ role_name=obj_name,
+ role_type=context.CLIARGS['role_type'],
+ issue_tracker_url='http://example.com/issue/tracker',
+ repository_url='http://example.com/repository',
+ documentation_url='http://docs.example.com',
+ homepage_url='http://example.com',
+ min_ansible_version=ansible_version[:3], # x.y
+ dependencies=[],
+ ))
+
+ skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
+ obj_path = os.path.join(init_path, obj_name)
+ elif galaxy_type == 'collection':
+ namespace, collection_name = obj_name.split('.', 1)
+
+ inject_data.update(dict(
+ namespace=namespace,
+ collection_name=collection_name,
+ version='1.0.0',
+ readme='README.md',
+ authors=['your name <example@domain.com>'],
+ license=['GPL-2.0-or-later'],
+ repository='http://example.com/repository',
+ documentation='http://docs.example.com',
+ homepage='http://example.com',
+ issues='http://example.com/issue/tracker',
+ build_ignore=[],
+ ))
+
+ skeleton_ignore_expressions = C.GALAXY_COLLECTION_SKELETON_IGNORE
+ obj_path = os.path.join(init_path, namespace, collection_name)
+
+ b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
+
+ if os.path.exists(b_obj_path):
+ if os.path.isfile(obj_path):
+ raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
+ elif not force:
+ raise AnsibleError("- the directory %s already exists. "
+ "You can use --force to re-initialize this directory,\n"
+ "however it will reset any main.yml files that may have\n"
+ "been modified there already." % to_native(obj_path))
+
+ # delete the contents rather than the collection root in case init was run from the root (--init-path ../../)
+ for root, dirs, files in os.walk(b_obj_path, topdown=True):
+ for old_dir in dirs:
+ path = os.path.join(root, old_dir)
+ shutil.rmtree(path)
+ for old_file in files:
+ path = os.path.join(root, old_file)
+ os.unlink(path)
+
+ if obj_skeleton is not None:
+ own_skeleton = False
+ else:
+ own_skeleton = True
+ obj_skeleton = self.galaxy.default_role_skeleton_path
+ skeleton_ignore_expressions = ['^.*/.git_keep$']
+
+ obj_skeleton = os.path.expanduser(obj_skeleton)
+ skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
+
+ if not os.path.exists(obj_skeleton):
+ raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
+ to_native(obj_skeleton), galaxy_type)
+ )
+
+ loader = DataLoader()
+ templar = Templar(loader, variables=inject_data)
+
+ # create role directory
+ if not os.path.exists(b_obj_path):
+ os.makedirs(b_obj_path)
+
+ for root, dirs, files in os.walk(obj_skeleton, topdown=True):
+ rel_root = os.path.relpath(root, obj_skeleton)
+ rel_dirs = rel_root.split(os.sep)
+ rel_root_dir = rel_dirs[0]
+ if galaxy_type == 'collection':
+ # A collection can contain templates in playbooks/*/templates and roles/*/templates
+ in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
+ else:
+ in_templates_dir = rel_root_dir == 'templates'
+
+ # Filter out ignored directory names
+ # Use [:] to mutate the list os.walk uses
+ dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
+
+ for f in files:
+ filename, ext = os.path.splitext(f)
+
+ if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
+ continue
+
+ if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
+ # Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
+ # dynamically which requires special options to be set.
+
+ # The templated data's keys must match the key name but the inject data contains collection_name
+ # instead of name. We just make a copy and change the key back to name for this file.
+ template_data = inject_data.copy()
+ template_data['name'] = template_data.pop('collection_name')
+
+ meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
+ b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
+ with open(b_dest_file, 'wb') as galaxy_obj:
+ galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
+ elif ext == ".j2" and not in_templates_dir:
+ src_template = os.path.join(root, f)
+ dest_file = os.path.join(obj_path, rel_root, filename)
+ template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
+ b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
+ with open(dest_file, 'wb') as df:
+ df.write(b_rendered)
+ else:
+ f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
+ shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
+
+ for d in dirs:
+ b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
+ if not os.path.exists(b_dir_path):
+ os.makedirs(b_dir_path)
+
+ display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
+
+ def execute_info(self):
+ """
+ prints out detailed information about an installed role as well as info available from the galaxy API.
+ """
+
+ roles_path = context.CLIARGS['roles_path']
+
+ data = ''
+ for role in context.CLIARGS['args']:
+
+ role_info = {'path': roles_path}
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, role)
+
+ install_info = gr.install_info
+ if install_info:
+ if 'version' in install_info:
+ install_info['installed_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ if not context.CLIARGS['offline']:
+ remote_data = None
+ try:
+ remote_data = self.api.lookup_role_by_name(role, False)
+ except AnsibleError as e:
+ if e.http_code == 400 and 'Bad Request' in e.message:
+ # Role does not exist in Ansible Galaxy
+ data = u"- the role %s was not found" % role
+ break
+
+ raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
+
+ if remote_data:
+ role_info.update(remote_data)
+
+ elif context.CLIARGS['offline'] and not gr._exists:
+ data = u"- the role %s was not found" % role
+ break
+
+ if gr.metadata:
+ role_info.update(gr.metadata)
+
+ req = RoleRequirement()
+ role_spec = req.role_yaml_parse({'role': role})
+ if role_spec:
+ role_info.update(role_spec)
+
+ data += self._display_role_info(role_info)
+
+ self.pager(data)
+
+ @with_collection_artifacts_manager
+ def execute_verify(self, artifacts_manager=None):
+
+ collections = context.CLIARGS['args']
+ search_paths = context.CLIARGS['collections_path']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ local_verify_only = context.CLIARGS['offline']
+ requirements_file = context.CLIARGS['requirements']
+ signatures = context.CLIARGS['signatures']
+ if signatures is not None:
+ signatures = list(signatures)
+
+ requirements = self._require_one_of_collections_requirements(
+ collections, requirements_file,
+ signatures=signatures,
+ artifacts_manager=artifacts_manager,
+ )['collections']
+
+ resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
+
+ results = verify_collections(
+ requirements, resolved_paths,
+ self.api_servers, ignore_errors,
+ local_verify_only=local_verify_only,
+ artifacts_manager=artifacts_manager,
+ )
+
+ if any(result for result in results if not result.success):
+ return 1
+
+ return 0
+
+ @with_collection_artifacts_manager
+ def execute_install(self, artifacts_manager=None):
+ """
+ Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
+ You can pass in a list (roles or collections) or use the file
+ option listed below (these are mutually exclusive). If you pass in a list, it
+ can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
+
+ :param artifacts_manager: Artifacts manager.
+ """
+ install_items = context.CLIARGS['args']
+ requirements_file = context.CLIARGS['requirements']
+ collection_path = None
+ signatures = context.CLIARGS.get('signatures')
+ if signatures is not None:
+ signatures = list(signatures)
+
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
+ "run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
+ "'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
+
+ # TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
+ collection_requirements = []
+ role_requirements = []
+ if context.CLIARGS['type'] == 'collection':
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
+ requirements = self._require_one_of_collections_requirements(
+ install_items, requirements_file,
+ signatures=signatures,
+ artifacts_manager=artifacts_manager,
+ )
+
+ collection_requirements = requirements['collections']
+ if requirements['roles']:
+ display.vvv(two_type_warning.format('role'))
+ else:
+ if not install_items and requirements_file is None:
+ raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
+
+ if requirements_file:
+ if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
+ raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
+
+ galaxy_args = self._raw_args
+ will_install_collections = self._implicit_role and '-p' not in galaxy_args and '--roles-path' not in galaxy_args
+
+ requirements = self._parse_requirements_file(
+ requirements_file,
+ artifacts_manager=artifacts_manager,
+ validate_signature_options=will_install_collections,
+ )
+ role_requirements = requirements['roles']
+
+ # We can only install collections and roles at the same time if the type wasn't specified and the -p
+ # argument was not used. If collections are present in the requirements then at least display a msg.
+ if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
+ '--roles-path' in galaxy_args):
+
+ # We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
+ # was explicit about the type and shouldn't care that collections were skipped.
+ display_func = display.warning if self._implicit_role else display.vvv
+ display_func(two_type_warning.format('collection'))
+ else:
+ collection_path = self._get_default_collection_path()
+ collection_requirements = requirements['collections']
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ for rname in context.CLIARGS['args']:
+ role = RoleRequirement.role_yaml_parse(rname.strip())
+ role_requirements.append(GalaxyRole(self.galaxy, self.lazy_role_api, **role))
+
+ if not role_requirements and not collection_requirements:
+ display.display("Skipping install, no requirements found")
+ return
+
+ if role_requirements:
+ display.display("Starting galaxy role install process")
+ self._execute_install_role(role_requirements)
+
+ if collection_requirements:
+ display.display("Starting galaxy collection install process")
+ # Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
+ # the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
+ self._execute_install_collection(
+ collection_requirements, collection_path,
+ artifacts_manager=artifacts_manager,
+ )
+
+ def _execute_install_collection(
+ self, requirements, path, artifacts_manager,
+ ):
+ force = context.CLIARGS['force']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ no_deps = context.CLIARGS['no_deps']
+ force_with_deps = context.CLIARGS['force_with_deps']
+ try:
+ disable_gpg_verify = context.CLIARGS['disable_gpg_verify']
+ except KeyError:
+ if self._implicit_role:
+ raise AnsibleError(
+ 'Unable to properly parse command line arguments. Please use "ansible-galaxy collection install" '
+ 'instead of "ansible-galaxy install".'
+ )
+ raise
+
+ # If `ansible-galaxy install` is used, collection-only options aren't available to the user and won't be in context.CLIARGS
+ allow_pre_release = context.CLIARGS.get('allow_pre_release', False)
+ upgrade = context.CLIARGS.get('upgrade', False)
+
+ collections_path = C.COLLECTIONS_PATHS
+ if len([p for p in collections_path if p.startswith(path)]) == 0:
+ display.warning("The specified collections path '%s' is not part of the configured Ansible "
+ "collections paths '%s'. The installed collection will not be picked up in an Ansible "
+ "run, unless within a playbook-adjacent collections directory." % (to_text(path), to_text(":".join(collections_path))))
+
+ output_path = validate_collection_path(path)
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+
+ install_collections(
+ requirements, output_path, self.api_servers, ignore_errors,
+ no_deps, force, force_with_deps, upgrade,
+ allow_pre_release=allow_pre_release,
+ artifacts_manager=artifacts_manager,
+ disable_gpg_verify=disable_gpg_verify,
+ offline=context.CLIARGS.get('offline', False),
+ )
+
+ return 0
+
+ def _execute_install_role(self, requirements):
+ role_file = context.CLIARGS['requirements']
+ no_deps = context.CLIARGS['no_deps']
+ force_deps = context.CLIARGS['force_with_deps']
+ force = context.CLIARGS['force'] or force_deps
+
+ for role in requirements:
+ # only process roles in roles files when names matches if given
+ if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
+ display.vvv('Skipping role %s' % role.name)
+ continue
+
+ display.vvv('Processing role %s ' % role.name)
+
+ # query the galaxy API for the role data
+
+ if role.install_info is not None:
+ if role.install_info['version'] != role.version or force:
+ if force:
+ display.display('- changing role %s from %s to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ role.remove()
+ else:
+ display.warning('- %s (%s) is already installed - use --force to change version to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ continue
+ else:
+ if not force:
+ display.display('- %s is already installed, skipping.' % str(role))
+ continue
+
+ try:
+ installed = role.install()
+ except AnsibleError as e:
+ display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
+ self.exit_without_ignore()
+ continue
+
+ # install dependencies, if we want them
+ if not no_deps and installed:
+ if not role.metadata:
+ # NOTE: the meta file is also required for installing the role, not just dependencies
+ display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
+ else:
+ role_dependencies = role.metadata_dependencies + role.requirements
+ for dep in role_dependencies:
+ display.debug('Installing dep %s' % dep)
+ dep_req = RoleRequirement()
+ dep_info = dep_req.role_yaml_parse(dep)
+ dep_role = GalaxyRole(self.galaxy, self.lazy_role_api, **dep_info)
+ if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
+ # we know we can skip this, as it's not going to
+ # be found on galaxy.ansible.com
+ continue
+ if dep_role.install_info is None:
+ if dep_role not in requirements:
+ display.display('- adding dependency: %s' % to_text(dep_role))
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s already pending installation.' % dep_role.name)
+ else:
+ if dep_role.install_info['version'] != dep_role.version:
+ if force_deps:
+ display.display('- changing dependent role %s from %s to %s' %
+ (dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
+ dep_role.remove()
+ requirements.append(dep_role)
+ else:
+ display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
+ (to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
+ else:
+ if force_deps:
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s is already installed, skipping.' % dep_role.name)
+
+ if not installed:
+ display.warning("- %s was NOT installed successfully." % role.name)
+ self.exit_without_ignore()
+
+ return 0
+
+ def execute_remove(self):
+ """
+ removes the list of roles passed as arguments from the local system.
+ """
+
+ if not context.CLIARGS['args']:
+ raise AnsibleOptionsError('- you must specify at least one role to remove.')
+
+ for role_name in context.CLIARGS['args']:
+ role = GalaxyRole(self.galaxy, self.api, role_name)
+ try:
+ if role.remove():
+ display.display('- successfully removed %s' % role_name)
+ else:
+ display.display('- %s is not installed, skipping.' % role_name)
+ except Exception as e:
+ raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
+
+ return 0
+
+ def execute_list(self):
+ """
+ List installed collections or roles
+ """
+
+ if context.CLIARGS['type'] == 'role':
+ self.execute_list_role()
+ elif context.CLIARGS['type'] == 'collection':
+ self.execute_list_collection()
+
+ def execute_list_role(self):
+ """
+ List all roles installed on the local system or a specific role
+ """
+
+ path_found = False
+ role_found = False
+ warnings = []
+ roles_search_paths = context.CLIARGS['roles_path']
+ role_name = context.CLIARGS['role']
+
+ for path in roles_search_paths:
+ role_path = GalaxyCLI._resolve_path(path)
+ if os.path.isdir(path):
+ path_found = True
+ else:
+ warnings.append("- the configured path {0} does not exist.".format(path))
+ continue
+
+ if role_name:
+ # show the requested role, if it exists
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, role_name, path=os.path.join(role_path, role_name))
+ if os.path.isdir(gr.path):
+ role_found = True
+ display.display('# %s' % os.path.dirname(gr.path))
+ _display_role(gr)
+ break
+ warnings.append("- the role %s was not found" % role_name)
+ else:
+ if not os.path.exists(role_path):
+ warnings.append("- the configured path %s does not exist." % role_path)
+ continue
+
+ if not os.path.isdir(role_path):
+ warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
+ continue
+
+ display.display('# %s' % role_path)
+ path_files = os.listdir(role_path)
+ for path_file in path_files:
+ gr = GalaxyRole(self.galaxy, self.lazy_role_api, path_file, path=path)
+ if gr.metadata:
+ _display_role(gr)
+
+ # Do not warn if the role was found in any of the search paths
+ if role_found and role_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ return 0
+
+ @with_collection_artifacts_manager
+ def execute_list_collection(self, artifacts_manager=None):
+ """
+ List all collections installed on the local system
+
+ :param artifacts_manager: Artifacts manager.
+ """
+ if artifacts_manager is not None:
+ artifacts_manager.require_build_metadata = False
+
+ output_format = context.CLIARGS['output_format']
+ collections_search_paths = set(context.CLIARGS['collections_path'])
+ collection_name = context.CLIARGS['collection']
+ default_collections_path = AnsibleCollectionConfig.collection_paths
+ collections_in_paths = {}
+
+ warnings = []
+ path_found = False
+ collection_found = False
+ for path in collections_search_paths:
+ collection_path = GalaxyCLI._resolve_path(path)
+ if not os.path.exists(path):
+ if path in default_collections_path:
+ # don't warn for missing default paths
+ continue
+ warnings.append("- the configured path {0} does not exist.".format(collection_path))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ path_found = True
+
+ if collection_name:
+ # list a specific collection
+
+ validate_collection_name(collection_name)
+ namespace, collection = collection_name.split('.')
+
+ collection_path = validate_collection_path(collection_path)
+ b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
+
+ if not os.path.exists(b_collection_path):
+ warnings.append("- unable to find {0} in collection paths".format(collection_name))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ collection_found = True
+
+ try:
+ collection = Requirement.from_dir_path_as_unknown(
+ b_collection_path,
+ artifacts_manager,
+ )
+ except ValueError as val_err:
+ six.raise_from(AnsibleError(val_err), val_err)
+
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver}
+ }
+
+ continue
+
+ fqcn_width, version_width = _get_collection_widths([collection])
+
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+ _display_collection(collection, fqcn_width, version_width)
+
+ else:
+ # list all collections
+ collection_path = validate_collection_path(path)
+ if os.path.isdir(collection_path):
+ display.vvv("Searching {0} for collections".format(collection_path))
+ collections = list(find_existing_collections(
+ collection_path, artifacts_manager,
+ ))
+ else:
+ # There was no 'ansible_collections/' directory in the path, so there
+ # or no collections here.
+ display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
+ continue
+
+ if not collections:
+ display.vvv("No collections found at {0}".format(collection_path))
+ continue
+
+ if output_format in {'yaml', 'json'}:
+ collections_in_paths[collection_path] = {
+ collection.fqcn: {'version': collection.ver} for collection in collections
+ }
+
+ continue
+
+ # Display header
+ fqcn_width, version_width = _get_collection_widths(collections)
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+
+ # Sort collections by the namespace and name
+ for collection in sorted(collections, key=to_text):
+ _display_collection(collection, fqcn_width, version_width)
+
+ # Do not warn if the specific collection was found in any of the search paths
+ if collection_found and collection_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ if output_format == 'json':
+ display.display(json.dumps(collections_in_paths))
+ elif output_format == 'yaml':
+ display.display(yaml_dump(collections_in_paths))
+
+ return 0
+
+ def execute_publish(self):
+ """
+ Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
+ """
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
+ wait = context.CLIARGS['wait']
+ timeout = context.CLIARGS['import_timeout']
+
+ publish_collection(collection_path, self.api, wait, timeout)
+
+ def execute_search(self):
+ ''' searches for roles on the Ansible Galaxy server'''
+ page_size = 1000
+ search = None
+
+ if context.CLIARGS['args']:
+ search = '+'.join(context.CLIARGS['args'])
+
+ if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
+ raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
+
+ response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
+ tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
+
+ if response['count'] == 0:
+ display.display("No roles match your search.", color=C.COLOR_ERROR)
+ return 1
+
+ data = [u'']
+
+ if response['count'] > page_size:
+ data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
+ else:
+ data.append(u"Found %d roles matching your search:" % response['count'])
+
+ max_len = []
+ for role in response['results']:
+ max_len.append(len(role['username'] + '.' + role['name']))
+ name_len = max(max_len)
+ format_str = u" %%-%ds %%s" % name_len
+ data.append(u'')
+ data.append(format_str % (u"Name", u"Description"))
+ data.append(format_str % (u"----", u"-----------"))
+ for role in response['results']:
+ data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
+
+ data = u'\n'.join(data)
+ self.pager(data)
+
+ return 0
+
+ def execute_import(self):
+ """ used to import a role into Ansible Galaxy """
+
+ colors = {
+ 'INFO': 'normal',
+ 'WARNING': C.COLOR_WARN,
+ 'ERROR': C.COLOR_ERROR,
+ 'SUCCESS': C.COLOR_OK,
+ 'FAILED': C.COLOR_ERROR,
+ }
+
+ github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
+ github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
+
+ if context.CLIARGS['check_status']:
+ task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
+ else:
+ # Submit an import request
+ task = self.api.create_import_task(github_user, github_repo,
+ reference=context.CLIARGS['reference'],
+ role_name=context.CLIARGS['role_name'])
+
+ if len(task) > 1:
+ # found multiple roles associated with github_user/github_repo
+ display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
+ color='yellow')
+ display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
+ for t in task:
+ display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
+ display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
+ color=C.COLOR_CHANGED)
+ return 0
+ # found a single role as expected
+ display.display("Successfully submitted import request %d" % task[0]['id'])
+ if not context.CLIARGS['wait']:
+ display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
+ display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
+
+ if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
+ # Get the status of the import
+ msg_list = []
+ finished = False
+ while not finished:
+ task = self.api.get_import_task(task_id=task[0]['id'])
+ for msg in task[0]['summary_fields']['task_messages']:
+ if msg['id'] not in msg_list:
+ display.display(msg['message_text'], color=colors[msg['message_type']])
+ msg_list.append(msg['id'])
+ if task[0]['state'] in ['SUCCESS', 'FAILED']:
+ finished = True
+ else:
+ time.sleep(10)
+
+ return 0
+
+ def execute_setup(self):
+ """ Setup an integration from Github or Travis for Ansible Galaxy roles"""
+
+ if context.CLIARGS['setup_list']:
+ # List existing integration secrets
+ secrets = self.api.list_secrets()
+ if len(secrets) == 0:
+ # None found
+ display.display("No integrations found.")
+ return 0
+ display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
+ display.display("---------- ---------- ----------", color=C.COLOR_OK)
+ for secret in secrets:
+ display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
+ secret['github_repo']), color=C.COLOR_OK)
+ return 0
+
+ if context.CLIARGS['remove_id']:
+ # Remove a secret
+ self.api.remove_secret(context.CLIARGS['remove_id'])
+ display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
+ return 0
+
+ source = context.CLIARGS['source']
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ secret = context.CLIARGS['secret']
+
+ resp = self.api.add_secret(source, github_user, github_repo, secret)
+ display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
+
+ return 0
+
+ def execute_delete(self):
+ """ Delete a role from Ansible Galaxy. """
+
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ resp = self.api.delete_role(github_user, github_repo)
+
+ if len(resp['deleted_roles']) > 1:
+ display.display("Deleted the following roles:")
+ display.display("ID User Name")
+ display.display("------ --------------- ----------")
+ for role in resp['deleted_roles']:
+ display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
+
+ display.display(resp['status'])
+
+ return 0
+
+
+def main(args=None):
+ GalaxyCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
new file mode 100755
index 0000000..e8ed75e
--- /dev/null
+++ b/lib/ansible/cli/inventory.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import sys
+
+import argparse
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.vars import combine_vars
+from ansible.utils.display import Display
+from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
+
+display = Display()
+
+INTERNAL_VARS = frozenset(['ansible_diff_mode',
+ 'ansible_config_file',
+ 'ansible_facts',
+ 'ansible_forks',
+ 'ansible_inventory_sources',
+ 'ansible_limit',
+ 'ansible_playbook_python',
+ 'ansible_run_tags',
+ 'ansible_skip_tags',
+ 'ansible_verbosity',
+ 'ansible_version',
+ 'inventory_dir',
+ 'inventory_file',
+ 'inventory_hostname',
+ 'inventory_hostname_short',
+ 'groups',
+ 'group_names',
+ 'omit',
+ 'playbook_dir', ])
+
+
+class InventoryCLI(CLI):
+ ''' used to display or dump the configured inventory as Ansible sees it '''
+
+ name = 'ansible-inventory'
+
+ ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
+ 'group': 'The name of a group in the inventory, relevant when using --graph', }
+
+ def __init__(self, args):
+
+ super(InventoryCLI, self).__init__(args)
+ self.vm = None
+ self.loader = None
+ self.inventory = None
+
+ def init_parser(self):
+ super(InventoryCLI, self).init_parser(
+ usage='usage: %prog [options] [host|group]',
+ epilog='Show Ansible inventory information, by default it uses the inventory script JSON format')
+
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+
+ # remove unused default options
+ self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
+ self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
+
+ self.parser.add_argument('args', metavar='host|group', nargs='?')
+
+ # Actions
+ action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
+ action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
+ action_group.add_argument("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
+ action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
+ help='create inventory graph, if supplying pattern it must be a valid group name')
+ self.parser.add_argument_group(action_group)
+
+ # graph
+ self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
+ help='Use YAML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
+ help='Use TOML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
+ help='Add vars to graph display, ignored unless used with --graph')
+
+ # list
+ self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
+ help="When doing an --list, represent in a way that is optimized for export,"
+ "not as an accurate representation of how Ansible has processed it")
+ self.parser.add_argument('--output', default=None, dest='output_file',
+ help="When doing --list, send the inventory to a file instead of to the screen")
+ # self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
+ # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
+
+ def post_process_args(self, options):
+ options = super(InventoryCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ # there can be only one! and, at least, one!
+ used = 0
+ for opt in (options.list, options.host, options.graph):
+ if opt:
+ used += 1
+ if used == 0:
+ raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
+ elif used > 1:
+ raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
+
+ # set host pattern to default if not supplied
+ if options.args:
+ options.pattern = options.args
+ else:
+ options.pattern = 'all'
+
+ return options
+
+ def run(self):
+
+ super(InventoryCLI, self).run()
+
+ # Initialize needed objects
+ self.loader, self.inventory, self.vm = self._play_prereqs()
+
+ results = None
+ if context.CLIARGS['host']:
+ hosts = self.inventory.get_hosts(context.CLIARGS['host'])
+ if len(hosts) != 1:
+ raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
+
+ myvars = self._get_host_variables(host=hosts[0])
+
+ # FIXME: should we template first?
+ results = self.dump(myvars)
+
+ elif context.CLIARGS['graph']:
+ results = self.inventory_graph()
+ elif context.CLIARGS['list']:
+ top = self._get_group('all')
+ if context.CLIARGS['yaml']:
+ results = self.yaml_inventory(top)
+ elif context.CLIARGS['toml']:
+ results = self.toml_inventory(top)
+ else:
+ results = self.json_inventory(top)
+ results = self.dump(results)
+
+ if results:
+ outfile = context.CLIARGS['output_file']
+ if outfile is None:
+ # FIXME: pager?
+ display.display(results)
+ else:
+ try:
+ with open(to_bytes(outfile), 'wb') as f:
+ f.write(to_bytes(results))
+ except (OSError, IOError) as e:
+ raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
+ sys.exit(0)
+
+ sys.exit(1)
+
+ @staticmethod
+ def dump(stuff):
+
+ if context.CLIARGS['yaml']:
+ import yaml
+ from ansible.parsing.yaml.dumper import AnsibleDumper
+ results = to_text(yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False, allow_unicode=True))
+ elif context.CLIARGS['toml']:
+ from ansible.plugins.inventory.toml import toml_dumps
+ try:
+ results = toml_dumps(stuff)
+ except TypeError as e:
+ raise AnsibleError(
+ 'The source inventory contains a value that cannot be represented in TOML: %s' % e
+ )
+ except KeyError as e:
+ raise AnsibleError(
+ 'The source inventory contains a non-string key (%s) which cannot be represented in TOML. '
+ 'The specified key will need to be converted to a string. Be aware that if your playbooks '
+ 'expect this key to be non-string, your playbooks will need to be modified to support this '
+ 'change.' % e.args[0]
+ )
+ else:
+ import json
+ from ansible.parsing.ajson import AnsibleJSONEncoder
+ try:
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True, ensure_ascii=False)
+ except TypeError as e:
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=False, indent=4, preprocess_unsafe=True, ensure_ascii=False)
+ display.warning("Could not sort JSON output due to issues while sorting keys: %s" % to_native(e))
+
+ return results
+
+ def _get_group_variables(self, group):
+
+ # get info from inventory source
+ res = group.get_vars()
+
+ # Always load vars plugins
+ res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
+ if context.CLIARGS['basedir']:
+ res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
+
+ if group.priority != 1:
+ res['ansible_group_priority'] = group.priority
+
+ return self._remove_internal(res)
+
+ def _get_host_variables(self, host):
+
+ if context.CLIARGS['export']:
+ # only get vars defined directly host
+ hostvars = host.get_vars()
+
+ # Always load vars plugins
+ hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
+ if context.CLIARGS['basedir']:
+ hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
+ else:
+ # get all vars flattened by host, but skip magic hostvars
+ hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
+
+ return self._remove_internal(hostvars)
+
+ def _get_group(self, gname):
+ group = self.inventory.groups.get(gname)
+ return group
+
+ @staticmethod
+ def _remove_internal(dump):
+
+ for internal in INTERNAL_VARS:
+ if internal in dump:
+ del dump[internal]
+
+ return dump
+
+ @staticmethod
+ def _remove_empty(dump):
+ # remove empty keys
+ for x in ('hosts', 'vars', 'children'):
+ if x in dump and not dump[x]:
+ del dump[x]
+
+ @staticmethod
+ def _show_vars(dump, depth):
+ result = []
+ for (name, val) in sorted(dump.items()):
+ result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
+ return result
+
+ @staticmethod
+ def _graph_name(name, depth=0):
+ if depth:
+ name = " |" * (depth) + "--%s" % name
+ return name
+
+ def _graph_group(self, group, depth=0):
+
+ result = [self._graph_name('@%s:' % group.name, depth)]
+ depth = depth + 1
+ for kid in group.child_groups:
+ result.extend(self._graph_group(kid, depth))
+
+ if group.name != 'all':
+ for host in group.hosts:
+ result.append(self._graph_name(host.name, depth))
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
+
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_group_variables(group), depth))
+
+ return result
+
+ def inventory_graph(self):
+
+ start_at = self._get_group(context.CLIARGS['pattern'])
+ if start_at:
+ return '\n'.join(self._graph_group(start_at))
+ else:
+ raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
+
+ def json_inventory(self, top):
+
+ seen = set()
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+ if group.name != 'all':
+ results[group.name]['hosts'] = [h.name for h in group.hosts]
+ results[group.name]['children'] = []
+ for subgroup in group.child_groups:
+ results[group.name]['children'].append(subgroup.name)
+ if subgroup.name not in seen:
+ results.update(format_group(subgroup))
+ seen.add(subgroup.name)
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ # populate meta
+ results['_meta'] = {'hostvars': {}}
+ hosts = self.inventory.get_hosts()
+ for host in hosts:
+ hvars = self._get_host_variables(host)
+ if hvars:
+ results['_meta']['hostvars'][host.name] = hvars
+
+ return results
+
+ def yaml_inventory(self, top):
+
+ seen = []
+
+ def format_group(group):
+ results = {}
+
+ # initialize group + vars
+ results[group.name] = {}
+
+ # subgroups
+ results[group.name]['children'] = {}
+ for subgroup in group.child_groups:
+ if subgroup.name != 'all':
+ results[group.name]['children'].update(format_group(subgroup))
+
+ # hosts for group
+ results[group.name]['hosts'] = {}
+ if group.name != 'all':
+ for h in group.hosts:
+ myvars = {}
+ if h.name not in seen: # avoid defining host vars more than once
+ seen.append(h.name)
+ myvars = self._get_host_variables(host=h)
+ results[group.name]['hosts'][h.name] = myvars
+
+ if context.CLIARGS['export']:
+ gvars = self._get_group_variables(group)
+ if gvars:
+ results[group.name]['vars'] = gvars
+
+ self._remove_empty(results[group.name])
+
+ return results
+
+ return format_group(top)
+
+ def toml_inventory(self, top):
+ seen = set()
+ has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+
+ results[group.name]['children'] = []
+ for subgroup in group.child_groups:
+ if subgroup.name == 'ungrouped' and not has_ungrouped:
+ continue
+ if group.name != 'all':
+ results[group.name]['children'].append(subgroup.name)
+ results.update(format_group(subgroup))
+
+ if group.name != 'all':
+ for host in group.hosts:
+ if host.name not in seen:
+ seen.add(host.name)
+ host_vars = self._get_host_variables(host=host)
+ else:
+ host_vars = {}
+ try:
+ results[group.name]['hosts'][host.name] = host_vars
+ except KeyError:
+ results[group.name]['hosts'] = {host.name: host_vars}
+
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ return results
+
+
+def main(args=None):
+ InventoryCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
new file mode 100755
index 0000000..c94cf0f
--- /dev/null
+++ b/lib/ansible/cli/playbook.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import stat
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError
+from ansible.executor.playbook_executor import PlaybookExecutor
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.block import Block
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class PlaybookCLI(CLI):
+ ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
+ See the project home page (https://docs.ansible.com) for more information. '''
+
+ name = 'ansible-playbook'
+
+ def init_parser(self):
+
+ # create parser for CLI options
+ super(PlaybookCLI, self).init_parser(
+ usage="%prog [options] playbook.yml [playbook2 ...]",
+ desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
+
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_meta_options(self.parser)
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+
+ # ansible playbook specific opts
+ self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ self.parser.add_argument('--start-at-task', dest='start_at_task',
+ help="start the playbook at the task matching this name")
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
+
+ def post_process_args(self, options):
+ options = super(PlaybookCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def run(self):
+
+ super(PlaybookCLI, self).run()
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # manages passwords
+ sshpass = None
+ becomepass = None
+ passwords = {}
+
+ # initial error check, to make sure all specified playbooks are accessible
+ # before we start running anything through the playbook executor
+ # also prep plugin paths
+ b_playbook_dirs = []
+ for playbook in context.CLIARGS['args']:
+
+ # resolve if it is collection playbook with FQCN notation, if not, leaves unchanged
+ resource = _get_collection_playbook_path(playbook)
+ if resource is not None:
+ playbook_collection = resource[2]
+ else:
+ # not an FQCN so must be a file
+ if not os.path.exists(playbook):
+ raise AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ # check if playbook is from collection (path can be passed directly)
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ # don't add collection playbooks to adjacency search path
+ if not playbook_collection:
+ # setup dirs to enable loading plugins from all playbooks in case they add callbacks/inventory/etc
+ b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
+ add_all_plugin_dirs(b_playbook_dir)
+ b_playbook_dirs.append(b_playbook_dir)
+
+ if b_playbook_dirs:
+ # allow collections adjacent to these playbooks
+ # we use list copy to avoid opening up 'adjacency' in the previous loop
+ AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
+
+ # don't deal with privilege escalation or passwords when we don't need to
+ if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
+ context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # create base objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
+ CLI.get_host_list(inventory, context.CLIARGS['subset'])
+
+ # flush fact cache if requested
+ if context.CLIARGS['flush_cache']:
+ self._flush_cache(inventory, variable_manager)
+
+ # create the playbook executor, which manages running the plays via a task queue manager
+ pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
+ variable_manager=variable_manager, loader=loader,
+ passwords=passwords)
+
+ results = pbex.run()
+
+ if isinstance(results, list):
+ for p in results:
+
+ display.display('\nplaybook: %s' % p['playbook'])
+ for idx, play in enumerate(p['plays']):
+ if play._included_path is not None:
+ loader.set_basedir(play._included_path)
+ else:
+ pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
+ loader.set_basedir(pb_dir)
+
+ # show host list if we were able to template into a list
+ try:
+ host_list = ','.join(play.hosts)
+ except TypeError:
+ host_list = ''
+
+ msg = "\n play #%d (%s): %s" % (idx + 1, host_list, play.name)
+ mytags = set(play.tags)
+ msg += '\tTAGS: [%s]' % (','.join(mytags))
+
+ if context.CLIARGS['listhosts']:
+ playhosts = set(inventory.get_hosts(play.hosts))
+ msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
+ for host in playhosts:
+ msg += "\n %s" % host
+
+ display.display(msg)
+
+ all_tags = set()
+ if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
+ taskmsg = ''
+ if context.CLIARGS['listtasks']:
+ taskmsg = ' tasks:\n'
+
+ def _process_block(b):
+ taskmsg = ''
+ for task in b.block:
+ if isinstance(task, Block):
+ taskmsg += _process_block(task)
+ else:
+ if task.action in C._ACTION_META and task.implicit:
+ continue
+
+ all_tags.update(task.tags)
+ if context.CLIARGS['listtasks']:
+ cur_tags = list(mytags.union(set(task.tags)))
+ cur_tags.sort()
+ if task.name:
+ taskmsg += " %s" % task.get_name()
+ else:
+ taskmsg += " %s" % task.action
+ taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
+
+ return taskmsg
+
+ all_vars = variable_manager.get_vars(play=play)
+ for block in play.compile():
+ block = block.filter_tagged_tasks(all_vars)
+ if not block.has_tasks():
+ continue
+ taskmsg += _process_block(block)
+
+ if context.CLIARGS['listtags']:
+ cur_tags = list(mytags.union(all_tags))
+ cur_tags.sort()
+ taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
+
+ display.display(taskmsg)
+
+ return 0
+ else:
+ return results
+
+ @staticmethod
+ def _flush_cache(inventory, variable_manager):
+ for host in inventory.list_hosts():
+ hostname = host.get_name()
+ variable_manager.clear_facts(hostname)
+
+
+def main(args=None):
+ PlaybookCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
new file mode 100755
index 0000000..dc8f055
--- /dev/null
+++ b/lib/ansible/cli/pull.py
@@ -0,0 +1,364 @@
+#!/usr/bin/env python
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import datetime
+import os
+import platform
+import random
+import shlex
+import shutil
+import socket
+import sys
+import time
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.loader import module_loader
+from ansible.utils.cmd_functions import run_cmd
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PullCLI(CLI):
+ ''' Used to pull a remote copy of ansible on each managed node,
+ each set to run via cron and update playbook source via a source repository.
+ This inverts the default *push* architecture of ansible into a *pull* architecture,
+ which has near-limitless scaling potential.
+
+ None of the CLI tools are designed to run concurrently with themselves,
+ you should use an external scheduler and/or locking to ensure there are no clashing operations.
+
+ The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
+ This is useful both for extreme scale-out as well as periodic remediation.
+ Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
+ excellent way to gather and analyze remote logs from ansible-pull.
+ '''
+
+ name = 'ansible-pull'
+
+ DEFAULT_REPO_TYPE = 'git'
+ DEFAULT_PLAYBOOK = 'local.yml'
+ REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr')
+ PLAYBOOK_ERRORS = {
+ 1: 'File does not exist',
+ 2: 'File is not readable',
+ }
+ ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.'
+ 'This can be a relative path within the checkout. By default, Ansible will'
+ "look for a playbook based on the host's fully-qualified domain name,"
+ 'on the host hostname and finally a playbook named *local.yml*.', }
+
+ SKIP_INVENTORY_DEFAULTS = True
+
+ @staticmethod
+ def _get_inv_cli():
+ inv_opts = ''
+ if context.CLIARGS.get('inventory', False):
+ for inv in context.CLIARGS['inventory']:
+ if isinstance(inv, list):
+ inv_opts += " -i '%s' " % ','.join(inv)
+ elif ',' in inv or os.path.exists(inv):
+ inv_opts += ' -i %s ' % inv
+
+ return inv_opts
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(PullCLI, self).init_parser(
+ usage='%prog -U <repository> [options] [<playbook.yml>]',
+ desc="pulls playbooks from a VCS repo and executes them for the local host")
+
+ # Do not add check_options as there's a conflict with --checkout/-C
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_runas_prompt_options(self.parser)
+
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook.yml', nargs='*')
+
+ # options unique to pull
+ self.parser.add_argument('--purge', default=False, action='store_true', help='purge checkout after playbook run')
+ self.parser.add_argument('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ self.parser.add_argument('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. '
+ 'This is a useful way to disperse git requests')
+ self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
+ help='run the playbook even if the repository could not be updated')
+ self.parser.add_argument('-d', '--directory', dest='dest', default=None,
+ help='absolute path of repository checkout directory (relative paths are not supported)')
+ self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
+ self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
+ self.parser.add_argument('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
+ self.parser.add_argument('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ self.parser.add_argument('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
+ help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
+ % (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
+ self.parser.add_argument('--verify-commit', dest='verify', default=False, action='store_true',
+ help='verify GPG signature of checked out commit, if it fails abort running the playbook. '
+ 'This needs the corresponding VCS module to support such an operation')
+ self.parser.add_argument('--clean', dest='clean', default=False, action='store_true',
+ help='modified files in the working repository will be discarded')
+ self.parser.add_argument('--track-subs', dest='tracksubs', default=False, action='store_true',
+ help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
+ # add a subset of the check_opts flag group manually, as the full set's
+ # shortcodes conflict with above --checkout/-C
+ self.parser.add_argument("--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ self.parser.add_argument("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check")
+
+ def post_process_args(self, options):
+ options = super(PullCLI, self).post_process_args(options)
+
+ if not options.dest:
+ hostname = socket.getfqdn()
+ # use a hostname dependent directory, in case of $HOME on nfs
+ options.dest = os.path.join(C.ANSIBLE_HOME, 'pull', hostname)
+ options.dest = os.path.expandvars(os.path.expanduser(options.dest))
+
+ if os.path.exists(options.dest) and not os.path.isdir(options.dest):
+ raise AnsibleOptionsError("%s is not a valid or accessible directory." % options.dest)
+
+ if options.sleep:
+ try:
+ secs = random.randint(0, int(options.sleep))
+ options.sleep = secs
+ except ValueError:
+ raise AnsibleOptionsError("%s is not a number." % options.sleep)
+
+ if not options.url:
+ raise AnsibleOptionsError("URL for repository not specified, use -h for help")
+
+ if options.module_name not in self.REPO_CHOICES:
+ raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (options.module_name, ','.join(self.REPO_CHOICES)))
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ return options
+
+ def run(self):
+ ''' use Runner lib to do SSH things '''
+
+ super(PullCLI, self).run()
+
+ # log command line
+ now = datetime.datetime.now()
+ display.display(now.strftime("Starting Ansible Pull at %F %T"))
+ display.display(' '.join(sys.argv))
+
+ # Build Checkout command
+ # Now construct the ansible command
+ node = platform.node()
+ host = socket.getfqdn()
+ hostnames = ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
+ if hostnames:
+ limit_opts = 'localhost,%s,127.0.0.1' % hostnames
+ else:
+ limit_opts = 'localhost,127.0.0.1'
+ base_opts = '-c local '
+ if context.CLIARGS['verbosity'] > 0:
+ base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])])
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost as default
+ inv_opts = self._get_inv_cli()
+ if not inv_opts:
+ inv_opts = " -i localhost, "
+ # avoid interpreter discovery since we already know which interpreter to use on localhost
+ inv_opts += '-e %s ' % shlex.quote('ansible_python_interpreter=%s' % sys.executable)
+
+ # SCM specific options
+ if context.CLIARGS['module_name'] == 'git':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+
+ if context.CLIARGS['accept_host_key']:
+ repo_opts += ' accept_hostkey=yes'
+
+ if context.CLIARGS['private_key_file']:
+ repo_opts += ' key_file=%s' % context.CLIARGS['private_key_file']
+
+ if context.CLIARGS['verify']:
+ repo_opts += ' verify_commit=yes'
+
+ if context.CLIARGS['tracksubs']:
+ repo_opts += ' track_submodules=yes'
+
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' depth=1'
+ elif context.CLIARGS['module_name'] == 'subversion':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' export=yes'
+ elif context.CLIARGS['module_name'] == 'hg':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ elif context.CLIARGS['module_name'] == 'bzr':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+ else:
+ raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s'
+ % (context.CLIARGS['module_name'],
+ ','.join(self.REPO_CHOICES)))
+
+ # options common to all supported SCMS
+ if context.CLIARGS['clean']:
+ repo_opts += ' force=yes'
+
+ path = module_loader.find_plugin(context.CLIARGS['module_name'])
+ if path is None:
+ raise AnsibleOptionsError(("module '%s' not found.\n" % context.CLIARGS['module_name']))
+
+ bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ # hardcode local and inventory/host as this is just meant to fetch the repo
+ cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts,
+ context.CLIARGS['module_name'],
+ repo_opts, limit_opts)
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex.quote(ev)
+
+ # Nap?
+ if context.CLIARGS['sleep']:
+ display.display("Sleeping for %d seconds..." % context.CLIARGS['sleep'])
+ time.sleep(context.CLIARGS['sleep'])
+
+ # RUN the Checkout command
+ display.debug("running ansible with VCS module to checkout repo")
+ display.vvvv('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if context.CLIARGS['force']:
+ display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
+ else:
+ return rc
+ elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out:
+ display.display("Repository has not changed, quitting.")
+ return 0
+
+ playbook = self.select_playbook(context.CLIARGS['dest'])
+ if playbook is None:
+ raise AnsibleOptionsError("Could not find a playbook to run.")
+
+ # Build playbook command
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if context.CLIARGS['vault_password_files']:
+ for vault_password_file in context.CLIARGS['vault_password_files']:
+ cmd += " --vault-password-file=%s" % vault_password_file
+ if context.CLIARGS['vault_ids']:
+ for vault_id in context.CLIARGS['vault_ids']:
+ cmd += " --vault-id=%s" % vault_id
+
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex.quote(ev)
+ if context.CLIARGS['become_ask_pass']:
+ cmd += ' --ask-become-pass'
+ if context.CLIARGS['skip_tags']:
+ cmd += ' --skip-tags "%s"' % to_native(u','.join(context.CLIARGS['skip_tags']))
+ if context.CLIARGS['tags']:
+ cmd += ' -t "%s"' % to_native(u','.join(context.CLIARGS['tags']))
+ if context.CLIARGS['subset']:
+ cmd += ' -l "%s"' % context.CLIARGS['subset']
+ else:
+ cmd += ' -l "%s"' % limit_opts
+ if context.CLIARGS['check']:
+ cmd += ' -C'
+ if context.CLIARGS['diff']:
+ cmd += ' -D'
+
+ os.chdir(context.CLIARGS['dest'])
+
+ # redo inventory options as new files might exist now
+ inv_opts = self._get_inv_cli()
+ if inv_opts:
+ cmd += inv_opts
+
+ # RUN THE PLAYBOOK COMMAND
+ display.debug("running ansible-playbook to do actual work")
+ display.debug('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if context.CLIARGS['purge']:
+ os.chdir('/')
+ try:
+ shutil.rmtree(context.CLIARGS['dest'])
+ except Exception as e:
+ display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e)))
+
+ return rc
+
+ @staticmethod
+ def try_playbook(path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+ @staticmethod
+ def select_playbook(path):
+ playbook = None
+ errors = []
+ if context.CLIARGS['args'] and context.CLIARGS['args'][0] is not None:
+ playbooks = []
+ for book in context.CLIARGS['args']:
+ book_path = os.path.join(path, book)
+ rc = PullCLI.try_playbook(book_path)
+ if rc != 0:
+ errors.append("%s: %s" % (book_path, PullCLI.PLAYBOOK_ERRORS[rc]))
+ continue
+ playbooks.append(book_path)
+ if 0 < len(errors):
+ display.warning("\n".join(errors))
+ elif len(playbooks) == len(context.CLIARGS['args']):
+ playbook = " ".join(playbooks)
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = os.path.join(path, fqdn + '.yml')
+ shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
+ localpb = os.path.join(path, PullCLI.DEFAULT_PLAYBOOK)
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = PullCLI.try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, PullCLI.PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ display.warning("\n".join(errors))
+ return playbook
+
+
+def main(args=None):
+ PullCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/scripts/__init__.py b/lib/ansible/cli/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/cli/scripts/__init__.py
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
new file mode 100755
index 0000000..9109137
--- /dev/null
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -0,0 +1,354 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+import argparse
+import fcntl
+import hashlib
+import io
+import os
+import pickle
+import signal
+import socket
+import sys
+import time
+import traceback
+import errno
+import json
+
+from contextlib import contextmanager
+
+from ansible import constants as C
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data
+from ansible.module_utils.service import fork_process
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+from ansible.utils.path import unfrackpath, makedirs_safe
+from ansible.utils.display import Display
+from ansible.utils.jsonrpc import JsonRpcServer
+
+display = Display()
+
+
+def read_stream(byte_stream):
+ size = int(byte_stream.readline().strip())
+
+ data = byte_stream.read(size)
+ if len(data) < size:
+ raise Exception("EOF found before data was complete")
+
+ data_hash = to_text(byte_stream.readline().strip())
+ if data_hash != hashlib.sha1(data).hexdigest():
+ raise Exception("Read {0} bytes, but data did not match checksum".format(size))
+
+ # restore escaped loose \r characters
+ data = data.replace(br'\r', b'\r')
+
+ return data
+
+
+@contextmanager
+def file_lock(lock_path):
+ """
+ Uses contextmanager to create and release a file lock based on the
+ given path. This allows us to create locks using `with file_lock()`
+ to prevent deadlocks related to failure to unlock properly.
+ """
+
+ lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT, 0o600)
+ fcntl.lockf(lock_fd, fcntl.LOCK_EX)
+ yield
+ fcntl.lockf(lock_fd, fcntl.LOCK_UN)
+ os.close(lock_fd)
+
+
+class ConnectionProcess(object):
+ '''
+ The connection process wraps around a Connection object that manages
+ the connection to a remote device that persists over the playbook
+ '''
+ def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None):
+ self.play_context = play_context
+ self.socket_path = socket_path
+ self.original_path = original_path
+ self._task_uuid = task_uuid
+
+ self.fd = fd
+ self.exception = None
+
+ self.srv = JsonRpcServer()
+ self.sock = None
+
+ self.connection = None
+ self._ansible_playbook_pid = ansible_playbook_pid
+
+ def start(self, options):
+ messages = list()
+ result = {}
+
+ try:
+ messages.append(('vvvv', 'control socket path is %s' % self.socket_path))
+
+ # If this is a relative path (~ gets expanded later) then plug the
+ # key's path on to the directory we originally came from, so we can
+ # find it now that our cwd is /
+ if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/':
+ self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file)
+ self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null',
+ task_uuid=self._task_uuid, ansible_playbook_pid=self._ansible_playbook_pid)
+ try:
+ self.connection.set_options(direct=options)
+ except ConnectionError as exc:
+ messages.append(('debug', to_text(exc)))
+ raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
+
+ self.connection._socket_path = self.socket_path
+ self.srv.register(self.connection)
+ messages.extend([('vvvv', msg) for msg in sys.stdout.getvalue().splitlines()])
+
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.bind(self.socket_path)
+ self.sock.listen(1)
+ messages.append(('vvvv', 'local domain socket listeners started successfully'))
+ except Exception as exc:
+ messages.extend(self.connection.pop_messages())
+ result['error'] = to_text(exc)
+ result['exception'] = traceback.format_exc()
+ finally:
+ result['messages'] = messages
+ self.fd.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ self.fd.close()
+
+ def run(self):
+ try:
+ log_messages = self.connection.get_option('persistent_log_messages')
+ while not self.connection._conn_closed:
+ signal.signal(signal.SIGALRM, self.connect_timeout)
+ signal.signal(signal.SIGTERM, self.handler)
+ signal.alarm(self.connection.get_option('persistent_connect_timeout'))
+
+ self.exception = None
+ (s, addr) = self.sock.accept()
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.command_timeout)
+ while True:
+ data = recv_data(s)
+ if not data:
+ break
+
+ if log_messages:
+ display.display("jsonrpc request: %s" % data, log_only=True)
+
+ request = json.loads(to_text(data, errors='surrogate_or_strict'))
+ if request.get('method') == "exec_command" and not self.connection.connected:
+ self.connection._connect()
+
+ signal.alarm(self.connection.get_option('persistent_command_timeout'))
+
+ resp = self.srv.handle_request(data)
+ signal.alarm(0)
+
+ if log_messages:
+ display.display("jsonrpc response: %s" % resp, log_only=True)
+
+ send_data(s, to_bytes(resp))
+
+ s.close()
+
+ except Exception as e:
+ # socket.accept() will raise EINTR if the socket.close() is called
+ if hasattr(e, 'errno'):
+ if e.errno != errno.EINTR:
+ self.exception = traceback.format_exc()
+ else:
+ self.exception = traceback.format_exc()
+
+ finally:
+ # allow time for any exception msg send over socket to receive at other end before shutting down
+ time.sleep(0.1)
+
+ # when done, close the connection properly and cleanup the socket file so it can be recreated
+ self.shutdown()
+
+ def connect_timeout(self, signum, frame):
+ msg = 'persistent connection idle timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and ' \
+ 'Troubleshooting Guide.' % self.connection.get_option('persistent_connect_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def command_timeout(self, signum, frame):
+ msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\
+ % self.connection.get_option('persistent_command_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def handler(self, signum, frame):
+ msg = 'signal handler called with signal %s.' % signum
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def shutdown(self):
+ """ Shuts down the local domain socket
+ """
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(self.socket_path))
+ if os.path.exists(self.socket_path):
+ try:
+ if self.sock:
+ self.sock.close()
+ if self.connection:
+ self.connection.close()
+ if self.connection.get_option("persistent_log_messages"):
+ for _level, message in self.connection.pop_messages():
+ display.display(message, log_only=True)
+ except Exception:
+ pass
+ finally:
+ if os.path.exists(self.socket_path):
+ os.remove(self.socket_path)
+ setattr(self.connection, '_socket_path', None)
+ setattr(self.connection, '_connected', False)
+
+ if os.path.exists(lock_path):
+ os.remove(lock_path)
+
+ display.display('shutdown complete', log_only=True)
+
+
+def main(args=None):
+ """ Called to initiate the connect to the remote device
+ """
+
+ parser = opt_help.create_base_parser(prog='ansible-connection')
+ opt_help.add_verbosity_options(parser)
+ parser.add_argument('playbook_pid')
+ parser.add_argument('task_uuid')
+ args = parser.parse_args(args[1:] if args is not None else args)
+
+ # initialize verbosity
+ display.verbosity = args.verbosity
+
+ rc = 0
+ result = {}
+ messages = list()
+ socket_path = None
+
+ # Need stdin as a byte stream
+ stdin = sys.stdin.buffer
+
+ # Note: update the below log capture code after Display.display() is refactored.
+ saved_stdout = sys.stdout
+ sys.stdout = io.StringIO()
+
+ try:
+ # read the play context data via stdin, which means depickling it
+ opts_data = read_stream(stdin)
+ init_data = read_stream(stdin)
+
+ pc_data = pickle.loads(init_data, encoding='bytes')
+ options = pickle.loads(opts_data, encoding='bytes')
+
+ play_context = PlayContext()
+ play_context.deserialize(pc_data)
+
+ except Exception as e:
+ rc = 1
+ result.update({
+ 'error': to_text(e),
+ 'exception': traceback.format_exc()
+ })
+
+ if rc == 0:
+ ssh = connection_loader.get('ssh', class_only=True)
+ ansible_playbook_pid = args.playbook_pid
+ task_uuid = args.task_uuid
+ cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
+ # create the persistent connection dir if need be and create the paths
+ # which we will be using later
+ tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
+ makedirs_safe(tmp_path)
+
+ socket_path = unfrackpath(cp % dict(directory=tmp_path))
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
+
+ with file_lock(lock_path):
+ if not os.path.exists(socket_path):
+ messages.append(('vvvv', 'local domain socket does not exist, starting it'))
+ original_path = os.getcwd()
+ r, w = os.pipe()
+ pid = fork_process()
+
+ if pid == 0:
+ try:
+ os.close(r)
+ wfd = os.fdopen(w, 'w')
+ process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
+ process.start(options)
+ except Exception:
+ messages.append(('error', traceback.format_exc()))
+ rc = 1
+
+ if rc == 0:
+ process.run()
+ else:
+ process.shutdown()
+
+ sys.exit(rc)
+
+ else:
+ os.close(w)
+ rfd = os.fdopen(r, 'r')
+ data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
+ messages.extend(data.pop('messages'))
+ result.update(data)
+
+ else:
+ messages.append(('vvvv', 'found existing local domain socket, using it!'))
+ conn = Connection(socket_path)
+ try:
+ conn.set_options(direct=options)
+ except ConnectionError as exc:
+ messages.append(('debug', to_text(exc)))
+ raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
+ pc_data = to_text(init_data)
+ try:
+ conn.update_play_context(pc_data)
+ conn.set_check_prompt(task_uuid)
+ except Exception as exc:
+ # Only network_cli has update_play context and set_check_prompt, so missing this is
+ # not fatal e.g. netconf
+ if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
+ pass
+ else:
+ result.update({
+ 'error': to_text(exc),
+ 'exception': traceback.format_exc()
+ })
+
+ if os.path.exists(socket_path):
+ messages.extend(Connection(socket_path).pop_messages())
+ messages.append(('vvvv', sys.stdout.getvalue()))
+ result.update({
+ 'messages': messages,
+ 'socket_path': socket_path
+ })
+
+ sys.stdout = saved_stdout
+ if 'exception' in result:
+ rc = 1
+ sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ else:
+ rc = 0
+ sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
+
+ sys.exit(rc)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
new file mode 100755
index 0000000..3e60329
--- /dev/null
+++ b/lib/ansible/cli/vault.py
@@ -0,0 +1,480 @@
+#!/usr/bin/env python
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
+from ansible.cli import CLI
+
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class VaultCLI(CLI):
+ ''' can encrypt any structured data file used by Ansible.
+ This can include *group_vars/* or *host_vars/* inventory variables,
+ variables loaded by *include_vars* or *vars_files*, or variable files
+ passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
+ Role variables and defaults are also included!
+
+ Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
+ If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
+ '''
+
+ name = 'ansible-vault'
+
+ FROM_STDIN = "stdin"
+ FROM_ARGS = "the command line args"
+ FROM_PROMPT = "the interactive prompt"
+
+ def __init__(self, args):
+
+ self.b_vault_pass = None
+ self.b_new_vault_pass = None
+ self.encrypt_string_read_stdin = False
+
+ self.encrypt_secret = None
+ self.encrypt_vault_id = None
+ self.new_encrypt_secret = None
+ self.new_encrypt_vault_id = None
+
+ super(VaultCLI, self).__init__(args)
+
+ def init_parser(self):
+ super(VaultCLI, self).init_parser(
+ desc="encryption/decryption utility for Ansible data files",
+ epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_vault_options(common)
+ opt_help.add_verbosity_options(common)
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ output = opt_help.argparse.ArgumentParser(add_help=False)
+ output.add_argument('--output', default=None, dest='output_file',
+ help='output file name for encrypt or decrypt; use - for stdout',
+ type=opt_help.unfrack_path())
+
+ # For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
+ vault_id = opt_help.argparse.ArgumentParser(add_help=False)
+ vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
+ action='store', type=str,
+ help='the vault id used to encrypt (required if more than one vault-id is provided)')
+
+ create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
+ create_parser.set_defaults(func=self.execute_create)
+ create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
+ decrypt_parser.set_defaults(func=self.execute_decrypt)
+ decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
+ edit_parser.set_defaults(func=self.execute_edit)
+ edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+ view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
+ encrypt_parser.set_defaults(func=self.execute_encrypt)
+ encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
+ enc_str_parser.set_defaults(func=self.execute_encrypt_string)
+ enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
+ enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
+ action='store_true',
+ help="Prompt for the string to encrypt")
+ enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
+ help='Do not hide input when prompted for the string to encrypt')
+ enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
+ action='append',
+ help="Specify the variable name")
+ enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
+ default=None,
+ help="Specify the variable name for stdin")
+
+ rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
+ rekey_parser.set_defaults(func=self.execute_rekey)
+ rekey_new_group = rekey_parser.add_mutually_exclusive_group()
+ rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
+ help="new vault password file for rekey", type=opt_help.unfrack_path())
+ rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
+ help='the new vault identity to use for rekey')
+ rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ def post_process_args(self, options):
+ options = super(VaultCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ if options.vault_ids:
+ for vault_id in options.vault_ids:
+ if u';' in vault_id:
+ raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
+
+ if getattr(options, 'output_file', None) and len(options.args) > 1:
+ raise AnsibleOptionsError("At most one input file may be used with the --output option")
+
+ if options.action == 'encrypt_string':
+ if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
+ self.encrypt_string_read_stdin = True
+
+ # TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
+ if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
+ raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
+
+ return options
+
+ def run(self):
+ super(VaultCLI, self).run()
+ loader = DataLoader()
+
+ # set default restrictive umask
+ old_umask = os.umask(0o077)
+
+ vault_ids = list(context.CLIARGS['vault_ids'])
+
+ # there are 3 types of actions, those that just 'read' (decrypt, view) and only
+ # need to ask for a password once, and those that 'write' (create, encrypt) that
+ # ask for a new password and confirm it, and 'read/write (rekey) that asks for the
+ # old password, then asks for a new one and confirms it.
+
+ default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
+ vault_ids = default_vault_ids + vault_ids
+
+ action = context.CLIARGS['action']
+
+ # TODO: instead of prompting for these before, we could let VaultEditor
+ # call a callback when it needs it.
+ if action in ['decrypt', 'view', 'rekey', 'edit']:
+ vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'])
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ if action in ['encrypt', 'encrypt_string', 'create']:
+
+ encrypt_vault_id = None
+ # no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
+ if action not in ['edit']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+
+ vault_secrets = None
+ vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if len(vault_secrets) > 1 and not encrypt_vault_id:
+ raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
+ ','.join([x[0] for x in vault_secrets]))
+
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ encrypt_secret = match_encrypt_secret(vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ # only one secret for encrypt for now, use the first vault_id and use its first secret
+ # TODO: exception if more than one?
+ self.encrypt_vault_id = encrypt_secret[0]
+ self.encrypt_secret = encrypt_secret[1]
+
+ if action in ['rekey']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+ # print('encrypt_vault_id: %s' % encrypt_vault_id)
+ # print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
+
+ # new_vault_ids should only ever be one item, from
+ # load the default vault ids if we are using encrypt-vault-id
+ new_vault_ids = []
+ if encrypt_vault_id:
+ new_vault_ids = default_vault_ids
+ if context.CLIARGS['new_vault_id']:
+ new_vault_ids.append(context.CLIARGS['new_vault_id'])
+
+ new_vault_password_files = []
+ if context.CLIARGS['new_vault_password_file']:
+ new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
+
+ new_vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=new_vault_ids,
+ vault_password_files=new_vault_password_files,
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if not new_vault_secrets:
+ raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
+
+ # There is only one new_vault_id currently and one new_vault_secret, or we
+ # use the id specified in --encrypt-vault-id
+ new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ self.new_encrypt_vault_id = new_encrypt_secret[0]
+ self.new_encrypt_secret = new_encrypt_secret[1]
+
+ loader.set_vault_secrets(vault_secrets)
+
+ # FIXME: do we need to create VaultEditor here? its not reused
+ vault = VaultLib(vault_secrets)
+ self.editor = VaultEditor(vault)
+
+ context.CLIARGS['func']()
+
+ # and restore umask
+ os.umask(old_umask)
+
+ def execute_encrypt(self):
+ ''' encrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading plaintext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ # Fixme: use the correct vau
+ self.editor.encrypt_file(f, self.encrypt_secret,
+ vault_id=self.encrypt_vault_id,
+ output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ @staticmethod
+ def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
+ indent = indent or 10
+
+ block_format_var_name = ""
+ if name:
+ block_format_var_name = "%s: " % name
+
+ block_format_header = "%s!vault |" % block_format_var_name
+ lines = []
+ vault_ciphertext = to_text(b_ciphertext)
+
+ lines.append(block_format_header)
+ for line in vault_ciphertext.splitlines():
+ lines.append('%s%s' % (' ' * indent, line))
+
+ yaml_ciphertext = '\n'.join(lines)
+ return yaml_ciphertext
+
+ def execute_encrypt_string(self):
+ ''' encrypt the supplied string using the provided vault secret '''
+ b_plaintext = None
+
+ # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
+ b_plaintext_list = []
+
+ # remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
+ # we don't add it to the plaintext list
+ args = [x for x in context.CLIARGS['args'] if x != '-']
+
+ # We can prompt and read input, or read from stdin, but not both.
+ if context.CLIARGS['encrypt_string_prompt']:
+ msg = "String to encrypt: "
+
+ name = None
+ name_prompt_response = display.prompt('Variable name (enter for no name): ')
+
+ # TODO: enforce var naming rules?
+ if name_prompt_response != "":
+ name = name_prompt_response
+
+ # TODO: could prompt for which vault_id to use for each plaintext string
+ # currently, it will just be the default
+ hide_input = not context.CLIARGS['show_string_input']
+ if hide_input:
+ msg = "String to encrypt (hidden): "
+ else:
+ msg = "String to encrypt:"
+
+ prompt_response = display.prompt(msg, private=hide_input)
+
+ if prompt_response == '':
+ raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
+
+ b_plaintext = to_bytes(prompt_response)
+ b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
+
+ # read from stdin
+ if self.encrypt_string_read_stdin:
+ if sys.stdout.isatty():
+ display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
+
+ stdin_text = sys.stdin.read()
+ if stdin_text == '':
+ raise AnsibleOptionsError('stdin was empty, not encrypting')
+
+ if sys.stdout.isatty() and not stdin_text.endswith("\n"):
+ display.display("\n")
+
+ b_plaintext = to_bytes(stdin_text)
+
+ # defaults to None
+ name = context.CLIARGS['encrypt_string_stdin_name']
+ b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
+
+ # use any leftover args as strings to encrypt
+ # Try to match args up to --name options
+ if context.CLIARGS.get('encrypt_string_names', False):
+ name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
+
+ # Some but not enough --name's to name each var
+ if len(args) > len(name_and_text_list):
+ # Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
+ display.display('The number of --name options do not match the number of args.',
+ stderr=True)
+ display.display('The last named variable will be "%s". The rest will not have'
+ ' names.' % context.CLIARGS['encrypt_string_names'][-1],
+ stderr=True)
+
+ # Add the rest of the args without specifying a name
+ for extra_arg in args[len(name_and_text_list):]:
+ name_and_text_list.append((None, extra_arg))
+
+ # if no --names are provided, just use the args without a name.
+ else:
+ name_and_text_list = [(None, x) for x in args]
+
+ # Convert the plaintext text objects to bytestrings and collect
+ for name_and_text in name_and_text_list:
+ name, plaintext = name_and_text
+
+ if plaintext == '':
+ raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
+
+ b_plaintext = to_bytes(plaintext)
+ b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
+
+ # TODO: specify vault_id per string?
+ # Format the encrypted strings and any corresponding stderr output
+ outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
+
+ b_outs = []
+ for output in outputs:
+ err = output.get('err', None)
+ out = output.get('out', '')
+ if err:
+ sys.stderr.write(err)
+ b_outs.append(to_bytes(out))
+
+ self.editor.write_data(b'\n'.join(b_outs), context.CLIARGS['output_file'] or '-')
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ # TODO: offer block or string ala eyaml
+
+ def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
+ # If we are only showing one item in the output, we don't need to included commented
+ # delimiters in the text
+ show_delimiter = False
+ if len(b_plaintext_list) > 1:
+ show_delimiter = True
+
+ # list of dicts {'out': '', 'err': ''}
+ output = []
+
+ # Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
+ # For more than one input, show some differentiating info in the stderr output so we can tell them
+ # apart. If we have a var name, we include that in the yaml
+ for index, b_plaintext_info in enumerate(b_plaintext_list):
+ # (the text itself, which input it came from, its name)
+ b_plaintext, src, name = b_plaintext_info
+
+ b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret, vault_id=vault_id)
+
+ # block formatting
+ yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
+
+ err_msg = None
+ if show_delimiter:
+ human_index = index + 1
+ if name:
+ err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
+ else:
+ err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
+ output.append({'out': yaml_text, 'err': err_msg})
+
+ return output
+
+ def execute_decrypt(self):
+ ''' decrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading ciphertext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Decryption successful", stderr=True)
+
+ def execute_create(self):
+ ''' create and open a file in an editor that will be encrypted with the provided vault secret when closed'''
+
+ if len(context.CLIARGS['args']) != 1:
+ raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
+
+ self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
+ vault_id=self.encrypt_vault_id)
+
+ def execute_edit(self):
+ ''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed'''
+ for f in context.CLIARGS['args']:
+ self.editor.edit_file(f)
+
+ def execute_view(self):
+ ''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
+
+ for f in context.CLIARGS['args']:
+ # Note: vault should return byte strings because it could encrypt
+ # and decrypt binary files. We are responsible for changing it to
+ # unicode here because we are displaying it and therefore can make
+ # the decision that the display doesn't have to be precisely what
+ # the input was (leave that to decrypt instead)
+ plaintext = self.editor.plaintext(f)
+ self.pager(to_text(plaintext))
+
+ def execute_rekey(self):
+ ''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
+ for f in context.CLIARGS['args']:
+ # FIXME: plumb in vault_id, use the default new_vault_secret for now
+ self.editor.rekey_file(f, self.new_encrypt_secret,
+ self.new_encrypt_vault_id)
+
+ display.display("Rekey successful", stderr=True)
+
+
+def main(args=None):
+ VaultCLI.cli_executor(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/collections/__init__.py b/lib/ansible/collections/__init__.py
new file mode 100644
index 0000000..6b3e2a7
--- /dev/null
+++ b/lib/ansible/collections/__init__.py
@@ -0,0 +1,29 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils._text import to_bytes
+
+B_FLAG_FILES = frozenset([b'MANIFEST.json', b'galaxy.yml'])
+
+
+def is_collection_path(path):
+ """
+ Verify that a path meets min requirements to be a collection
+ :param path: byte-string path to evaluate for collection containment
+ :return: boolean signifying 'collectionness'
+ """
+
+ is_coll = False
+ b_path = to_bytes(path)
+ if os.path.isdir(b_path):
+ for b_flag in B_FLAG_FILES:
+ if os.path.exists(os.path.join(b_path, b_flag)):
+ is_coll = True
+ break
+
+ return is_coll
diff --git a/lib/ansible/collections/list.py b/lib/ansible/collections/list.py
new file mode 100644
index 0000000..af3c1ca
--- /dev/null
+++ b/lib/ansible/collections/list.py
@@ -0,0 +1,114 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from collections import defaultdict
+
+from ansible.errors import AnsibleError
+from ansible.collections import is_collection_path
+from ansible.module_utils._text import to_bytes
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def list_collections(coll_filter=None, search_paths=None, dedupe=False):
+
+ collections = {}
+ for candidate in list_collection_dirs(search_paths=search_paths, coll_filter=coll_filter):
+ if os.path.exists(candidate):
+ collection = _get_collection_name_from_path(candidate)
+ if collection not in collections or not dedupe:
+ collections[collection] = candidate
+ return collections
+
+
+def list_valid_collection_paths(search_paths=None, warn=False):
+ """
+ Filter out non existing or invalid search_paths for collections
+ :param search_paths: list of text-string paths, if none load default config
+ :param warn: display warning if search_path does not exist
+ :return: subset of original list
+ """
+
+ if search_paths is None:
+ search_paths = []
+
+ search_paths.extend(AnsibleCollectionConfig.collection_paths)
+
+ for path in search_paths:
+
+ b_path = to_bytes(path)
+ if not os.path.exists(b_path):
+ # warn for missing, but not if default
+ if warn:
+ display.warning("The configured collection path {0} does not exist.".format(path))
+ continue
+
+ if not os.path.isdir(b_path):
+ if warn:
+ display.warning("The configured collection path {0}, exists, but it is not a directory.".format(path))
+ continue
+
+ yield path
+
+
+def list_collection_dirs(search_paths=None, coll_filter=None):
+ """
+ Return paths for the specific collections found in passed or configured search paths
+ :param search_paths: list of text-string paths, if none load default config
+ :param coll_filter: limit collections to just the specific namespace or collection, if None all are returned
+ :return: list of collection directory paths
+ """
+
+ collection = None
+ namespace = None
+ if coll_filter is not None:
+ if '.' in coll_filter:
+ try:
+ (namespace, collection) = coll_filter.split('.')
+ except ValueError:
+ raise AnsibleError("Invalid collection pattern supplied: %s" % coll_filter)
+ else:
+ namespace = coll_filter
+
+ collections = defaultdict(dict)
+ for path in list_valid_collection_paths(search_paths):
+
+ if os.path.basename(path) != 'ansible_collections':
+ path = os.path.join(path, 'ansible_collections')
+
+ b_coll_root = to_bytes(path, errors='surrogate_or_strict')
+
+ if os.path.exists(b_coll_root) and os.path.isdir(b_coll_root):
+
+ if namespace is None:
+ namespaces = os.listdir(b_coll_root)
+ else:
+ namespaces = [namespace]
+
+ for ns in namespaces:
+ b_namespace_dir = os.path.join(b_coll_root, to_bytes(ns))
+
+ if os.path.isdir(b_namespace_dir):
+
+ if collection is None:
+ colls = os.listdir(b_namespace_dir)
+ else:
+ colls = [collection]
+
+ for mycoll in colls:
+
+ # skip dupe collections as they will be masked in execution
+ if mycoll not in collections[ns]:
+ b_coll = to_bytes(mycoll)
+ b_coll_dir = os.path.join(b_namespace_dir, b_coll)
+ if is_collection_path(b_coll_dir):
+ collections[ns][mycoll] = b_coll_dir
+ yield b_coll_dir
diff --git a/lib/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py
new file mode 100644
index 0000000..2990c6f
--- /dev/null
+++ b/lib/ansible/compat/__init__.py
@@ -0,0 +1,26 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat library for ansible. This contains compatibility definitions for older python
+When we need to import a module differently depending on python version, do it
+here. Then in the code we can simply import from compat in order to get what we want.
+'''
diff --git a/lib/ansible/compat/selectors/__init__.py b/lib/ansible/compat/selectors/__init__.py
new file mode 100644
index 0000000..a7b260e
--- /dev/null
+++ b/lib/ansible/compat/selectors/__init__.py
@@ -0,0 +1,32 @@
+# (c) 2014, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# NOT_BUNDLED
+
+'''
+Compat selectors library. Python-3.5 has this builtin. The selectors2
+package exists on pypi to backport the functionality as far as python-2.6.
+Implementation previously resided here - maintaining this file after the
+move to ansible.module_utils for code backwards compatibility.
+'''
+import sys
+from ansible.module_utils.compat import selectors
+sys.modules['ansible.compat.selectors'] = selectors
diff --git a/lib/ansible/config/__init__.py b/lib/ansible/config/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/config/__init__.py
diff --git a/lib/ansible/config/ansible_builtin_runtime.yml b/lib/ansible/config/ansible_builtin_runtime.yml
new file mode 100644
index 0000000..e7c4f03
--- /dev/null
+++ b/lib/ansible/config/ansible_builtin_runtime.yml
@@ -0,0 +1,9742 @@
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+plugin_routing:
+ connection:
+ # test entries
+ redirected_local:
+ redirect: ansible.builtin.local
+ buildah:
+ redirect: containers.podman.buildah
+ podman:
+ redirect: containers.podman.podman
+ aws_ssm:
+ redirect: community.aws.aws_ssm
+ chroot:
+ redirect: community.general.chroot
+ docker:
+ redirect: community.docker.docker
+ funcd:
+ redirect: community.general.funcd
+ iocage:
+ redirect: community.general.iocage
+ jail:
+ redirect: community.general.jail
+ kubectl:
+ redirect: kubernetes.core.kubectl
+ libvirt_lxc:
+ redirect: community.libvirt.libvirt_lxc
+ lxc:
+ redirect: community.general.lxc
+ lxd:
+ redirect: community.general.lxd
+ oc:
+ redirect: community.okd.oc
+ qubes:
+ redirect: community.general.qubes
+ saltstack:
+ redirect: community.general.saltstack
+ zone:
+ redirect: community.general.zone
+ vmware_tools:
+ redirect: community.vmware.vmware_tools
+ httpapi:
+ redirect: ansible.netcommon.httpapi
+ napalm:
+ redirect: ansible.netcommon.napalm
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network_cli:
+ redirect: ansible.netcommon.network_cli
+ persistent:
+ redirect: ansible.netcommon.persistent
+ modules:
+ # test entry
+ formerly_core_ping:
+ redirect: testns.testcoll.ping
+ # test entry
+ uses_redirected_action:
+ redirect: ansible.builtin.ping
+ podman_container_info:
+ redirect: containers.podman.podman_container_info
+ podman_image_info:
+ redirect: containers.podman.podman_image_info
+ podman_image:
+ redirect: containers.podman.podman_image
+ podman_volume_info:
+ redirect: containers.podman.podman_volume_info
+ frr_facts:
+ redirect: frr.frr.frr_facts
+ frr_bgp:
+ redirect: frr.frr.frr_bgp
+ apt_repo:
+ redirect: community.general.apt_repo
+ aws_acm_facts:
+ redirect: community.aws.aws_acm_facts
+ aws_kms_facts:
+ redirect: community.aws.aws_kms_facts
+ aws_region_facts:
+ redirect: community.aws.aws_region_facts
+ aws_s3_bucket_facts:
+ redirect: community.aws.aws_s3_bucket_facts
+ aws_sgw_facts:
+ redirect: community.aws.aws_sgw_facts
+ aws_waf_facts:
+ redirect: community.aws.aws_waf_facts
+ cloudfront_facts:
+ redirect: community.aws.cloudfront_facts
+ cloudwatchlogs_log_group_facts:
+ redirect: community.aws.cloudwatchlogs_log_group_facts
+ ec2_asg_facts:
+ redirect: community.aws.ec2_asg_facts
+ ec2_customer_gateway_facts:
+ redirect: community.aws.ec2_customer_gateway_facts
+ ec2_instance_facts:
+ redirect: community.aws.ec2_instance_facts
+ ec2_eip_facts:
+ redirect: community.aws.ec2_eip_facts
+ ec2_elb_facts:
+ redirect: community.aws.ec2_elb_facts
+ ec2_lc_facts:
+ redirect: community.aws.ec2_lc_facts
+ ec2_placement_group_facts:
+ redirect: community.aws.ec2_placement_group_facts
+ ec2_vpc_endpoint_facts:
+ redirect: community.aws.ec2_vpc_endpoint_facts
+ ec2_vpc_igw_facts:
+ redirect: community.aws.ec2_vpc_igw_facts
+ ec2_vpc_nacl_facts:
+ redirect: community.aws.ec2_vpc_nacl_facts
+ ec2_vpc_nat_gateway_facts:
+ redirect: community.aws.ec2_vpc_nat_gateway_facts
+ ec2_vpc_peering_facts:
+ redirect: community.aws.ec2_vpc_peering_facts
+ ec2_vpc_route_table_facts:
+ redirect: community.aws.ec2_vpc_route_table_facts
+ ec2_vpc_vgw_facts:
+ redirect: community.aws.ec2_vpc_vgw_facts
+ ec2_vpc_vpn_facts:
+ redirect: community.aws.ec2_vpc_vpn_facts
+ ecs_service_facts:
+ redirect: community.aws.ecs_service_facts
+ ecs_taskdefinition_facts:
+ redirect: community.aws.ecs_taskdefinition_facts
+ efs_facts:
+ redirect: community.aws.efs_facts
+ elasticache_facts:
+ redirect: community.aws.elasticache_facts
+ elb_application_lb_facts:
+ redirect: community.aws.elb_application_lb_facts
+ elb_classic_lb_facts:
+ redirect: community.aws.elb_classic_lb_facts
+ elb_target_facts:
+ redirect: community.aws.elb_target_facts
+ elb_target_group_facts:
+ redirect: community.aws.elb_target_group_facts
+ iam_cert_facts:
+ redirect: community.aws.iam_cert_facts
+ iam_mfa_device_facts:
+ redirect: community.aws.iam_mfa_device_facts
+ iam_role_facts:
+ redirect: community.aws.iam_role_facts
+ iam_server_certificate_facts:
+ redirect: community.aws.iam_server_certificate_facts
+ lambda_facts:
+ redirect: community.aws.lambda_facts
+ rds_instance_facts:
+ redirect: community.aws.rds_instance_facts
+ rds_snapshot_facts:
+ redirect: community.aws.rds_snapshot_facts
+ redshift_facts:
+ redirect: community.aws.redshift_facts
+ route53_facts:
+ redirect: community.aws.route53_facts
+ aws_acm:
+ redirect: community.aws.aws_acm
+ aws_acm_info:
+ redirect: community.aws.aws_acm_info
+ aws_api_gateway:
+ redirect: community.aws.aws_api_gateway
+ aws_application_scaling_policy:
+ redirect: community.aws.aws_application_scaling_policy
+ aws_batch_compute_environment:
+ redirect: community.aws.aws_batch_compute_environment
+ aws_batch_job_definition:
+ redirect: community.aws.aws_batch_job_definition
+ aws_batch_job_queue:
+ redirect: community.aws.aws_batch_job_queue
+ aws_codebuild:
+ redirect: community.aws.aws_codebuild
+ aws_codecommit:
+ redirect: community.aws.aws_codecommit
+ aws_codepipeline:
+ redirect: community.aws.aws_codepipeline
+ aws_config_aggregation_authorization:
+ redirect: community.aws.aws_config_aggregation_authorization
+ aws_config_aggregator:
+ redirect: community.aws.aws_config_aggregator
+ aws_config_delivery_channel:
+ redirect: community.aws.aws_config_delivery_channel
+ aws_config_recorder:
+ redirect: community.aws.aws_config_recorder
+ aws_config_rule:
+ redirect: community.aws.aws_config_rule
+ aws_direct_connect_connection:
+ redirect: community.aws.aws_direct_connect_connection
+ aws_direct_connect_gateway:
+ redirect: community.aws.aws_direct_connect_gateway
+ aws_direct_connect_link_aggregation_group:
+ redirect: community.aws.aws_direct_connect_link_aggregation_group
+ aws_direct_connect_virtual_interface:
+ redirect: community.aws.aws_direct_connect_virtual_interface
+ aws_eks_cluster:
+ redirect: community.aws.aws_eks_cluster
+ aws_elasticbeanstalk_app:
+ redirect: community.aws.aws_elasticbeanstalk_app
+ aws_glue_connection:
+ redirect: community.aws.aws_glue_connection
+ aws_glue_job:
+ redirect: community.aws.aws_glue_job
+ aws_inspector_target:
+ redirect: community.aws.aws_inspector_target
+ aws_kms:
+ redirect: community.aws.aws_kms
+ aws_kms_info:
+ redirect: community.aws.aws_kms_info
+ aws_region_info:
+ redirect: community.aws.aws_region_info
+ aws_s3_bucket_info:
+ redirect: community.aws.aws_s3_bucket_info
+ aws_s3_cors:
+ redirect: community.aws.aws_s3_cors
+ aws_secret:
+ redirect: community.aws.aws_secret
+ aws_ses_identity:
+ redirect: community.aws.aws_ses_identity
+ aws_ses_identity_policy:
+ redirect: community.aws.aws_ses_identity_policy
+ aws_ses_rule_set:
+ redirect: community.aws.aws_ses_rule_set
+ aws_sgw_info:
+ redirect: community.aws.aws_sgw_info
+ aws_ssm_parameter_store:
+ redirect: community.aws.aws_ssm_parameter_store
+ aws_step_functions_state_machine:
+ redirect: community.aws.aws_step_functions_state_machine
+ aws_step_functions_state_machine_execution:
+ redirect: community.aws.aws_step_functions_state_machine_execution
+ aws_waf_condition:
+ redirect: community.aws.aws_waf_condition
+ aws_waf_info:
+ redirect: community.aws.aws_waf_info
+ aws_waf_rule:
+ redirect: community.aws.aws_waf_rule
+ aws_waf_web_acl:
+ redirect: community.aws.aws_waf_web_acl
+ cloudformation_stack_set:
+ redirect: community.aws.cloudformation_stack_set
+ cloudformation_exports_info:
+ redirect: community.aws.cloudformation_exports_info
+ cloudfront_distribution:
+ redirect: community.aws.cloudfront_distribution
+ cloudfront_info:
+ redirect: community.aws.cloudfront_info
+ cloudfront_invalidation:
+ redirect: community.aws.cloudfront_invalidation
+ cloudfront_origin_access_identity:
+ redirect: community.aws.cloudfront_origin_access_identity
+ cloudtrail:
+ redirect: community.aws.cloudtrail
+ cloudwatchevent_rule:
+ redirect: community.aws.cloudwatchevent_rule
+ cloudwatchlogs_log_group:
+ redirect: community.aws.cloudwatchlogs_log_group
+ cloudwatchlogs_log_group_info:
+ redirect: community.aws.cloudwatchlogs_log_group_info
+ cloudwatchlogs_log_group_metric_filter:
+ redirect: community.aws.cloudwatchlogs_log_group_metric_filter
+ data_pipeline:
+ redirect: community.aws.data_pipeline
+ dms_endpoint:
+ redirect: community.aws.dms_endpoint
+ dms_replication_subnet_group:
+ redirect: community.aws.dms_replication_subnet_group
+ dynamodb_table:
+ redirect: community.aws.dynamodb_table
+ dynamodb_ttl:
+ redirect: community.aws.dynamodb_ttl
+ ec2_ami_copy:
+ redirect: community.aws.ec2_ami_copy
+ ec2_asg:
+ redirect: community.aws.ec2_asg
+ ec2_asg_info:
+ redirect: community.aws.ec2_asg_info
+ ec2_asg_lifecycle_hook:
+ redirect: community.aws.ec2_asg_lifecycle_hook
+ ec2_customer_gateway:
+ redirect: community.aws.ec2_customer_gateway
+ ec2_customer_gateway_info:
+ redirect: community.aws.ec2_customer_gateway_info
+ ec2_eip:
+ redirect: community.aws.ec2_eip
+ ec2_eip_info:
+ redirect: community.aws.ec2_eip_info
+ ec2_elb:
+ redirect: community.aws.ec2_elb
+ ec2_elb_info:
+ redirect: community.aws.ec2_elb_info
+ ec2_instance:
+ redirect: community.aws.ec2_instance
+ ec2_instance_info:
+ redirect: community.aws.ec2_instance_info
+ ec2_launch_template:
+ redirect: community.aws.ec2_launch_template
+ ec2_lc:
+ redirect: community.aws.ec2_lc
+ ec2_lc_find:
+ redirect: community.aws.ec2_lc_find
+ ec2_lc_info:
+ redirect: community.aws.ec2_lc_info
+ ec2_metric_alarm:
+ redirect: community.aws.ec2_metric_alarm
+ ec2_placement_group:
+ redirect: community.aws.ec2_placement_group
+ ec2_placement_group_info:
+ redirect: community.aws.ec2_placement_group_info
+ ec2_scaling_policy:
+ redirect: community.aws.ec2_scaling_policy
+ ec2_snapshot_copy:
+ redirect: community.aws.ec2_snapshot_copy
+ ec2_transit_gateway:
+ redirect: community.aws.ec2_transit_gateway
+ ec2_transit_gateway_info:
+ redirect: community.aws.ec2_transit_gateway_info
+ ec2_vpc_egress_igw:
+ redirect: community.aws.ec2_vpc_egress_igw
+ ec2_vpc_endpoint:
+ redirect: community.aws.ec2_vpc_endpoint
+ ec2_vpc_endpoint_info:
+ redirect: community.aws.ec2_vpc_endpoint_info
+ ec2_vpc_igw:
+ redirect: community.aws.ec2_vpc_igw
+ ec2_vpc_igw_info:
+ redirect: community.aws.ec2_vpc_igw_info
+ ec2_vpc_nacl:
+ redirect: community.aws.ec2_vpc_nacl
+ ec2_vpc_nacl_info:
+ redirect: community.aws.ec2_vpc_nacl_info
+ ec2_vpc_nat_gateway:
+ redirect: community.aws.ec2_vpc_nat_gateway
+ ec2_vpc_nat_gateway_info:
+ redirect: community.aws.ec2_vpc_nat_gateway_info
+ ec2_vpc_peer:
+ redirect: community.aws.ec2_vpc_peer
+ ec2_vpc_peering_info:
+ redirect: community.aws.ec2_vpc_peering_info
+ ec2_vpc_route_table:
+ redirect: community.aws.ec2_vpc_route_table
+ ec2_vpc_route_table_info:
+ redirect: community.aws.ec2_vpc_route_table_info
+ ec2_vpc_vgw:
+ redirect: community.aws.ec2_vpc_vgw
+ ec2_vpc_vgw_info:
+ redirect: community.aws.ec2_vpc_vgw_info
+ ec2_vpc_vpn:
+ redirect: community.aws.ec2_vpc_vpn
+ ec2_vpc_vpn_info:
+ redirect: community.aws.ec2_vpc_vpn_info
+ ec2_win_password:
+ redirect: community.aws.ec2_win_password
+ ecs_attribute:
+ redirect: community.aws.ecs_attribute
+ ecs_cluster:
+ redirect: community.aws.ecs_cluster
+ ecs_ecr:
+ redirect: community.aws.ecs_ecr
+ ecs_service:
+ redirect: community.aws.ecs_service
+ ecs_service_info:
+ redirect: community.aws.ecs_service_info
+ ecs_tag:
+ redirect: community.aws.ecs_tag
+ ecs_task:
+ redirect: community.aws.ecs_task
+ ecs_taskdefinition:
+ redirect: community.aws.ecs_taskdefinition
+ ecs_taskdefinition_info:
+ redirect: community.aws.ecs_taskdefinition_info
+ efs:
+ redirect: community.aws.efs
+ efs_info:
+ redirect: community.aws.efs_info
+ elasticache:
+ redirect: community.aws.elasticache
+ elasticache_info:
+ redirect: community.aws.elasticache_info
+ elasticache_parameter_group:
+ redirect: community.aws.elasticache_parameter_group
+ elasticache_snapshot:
+ redirect: community.aws.elasticache_snapshot
+ elasticache_subnet_group:
+ redirect: community.aws.elasticache_subnet_group
+ elb_application_lb:
+ redirect: community.aws.elb_application_lb
+ elb_application_lb_info:
+ redirect: community.aws.elb_application_lb_info
+ elb_classic_lb:
+ redirect: community.aws.elb_classic_lb
+ elb_classic_lb_info:
+ redirect: community.aws.elb_classic_lb_info
+ elb_instance:
+ redirect: community.aws.elb_instance
+ elb_network_lb:
+ redirect: community.aws.elb_network_lb
+ elb_target:
+ redirect: community.aws.elb_target
+ elb_target_group:
+ redirect: community.aws.elb_target_group
+ elb_target_group_info:
+ redirect: community.aws.elb_target_group_info
+ elb_target_info:
+ redirect: community.aws.elb_target_info
+ execute_lambda:
+ redirect: community.aws.execute_lambda
+ iam:
+ redirect: community.aws.iam
+ iam_cert:
+ redirect: community.aws.iam_cert
+ iam_group:
+ redirect: community.aws.iam_group
+ iam_managed_policy:
+ redirect: community.aws.iam_managed_policy
+ iam_mfa_device_info:
+ redirect: community.aws.iam_mfa_device_info
+ iam_password_policy:
+ redirect: community.aws.iam_password_policy
+ iam_policy:
+ redirect: community.aws.iam_policy
+ iam_policy_info:
+ redirect: community.aws.iam_policy_info
+ iam_role:
+ redirect: community.aws.iam_role
+ iam_role_info:
+ redirect: community.aws.iam_role_info
+ iam_saml_federation:
+ redirect: community.aws.iam_saml_federation
+ iam_server_certificate_info:
+ redirect: community.aws.iam_server_certificate_info
+ iam_user:
+ redirect: community.aws.iam_user
+ iam_user_info:
+ redirect: community.aws.iam_user_info
+ kinesis_stream:
+ redirect: community.aws.kinesis_stream
+ lambda:
+ redirect: community.aws.lambda
+ lambda_alias:
+ redirect: community.aws.lambda_alias
+ lambda_event:
+ redirect: community.aws.lambda_event
+ lambda_info:
+ redirect: community.aws.lambda_info
+ lambda_policy:
+ redirect: community.aws.lambda_policy
+ lightsail:
+ redirect: community.aws.lightsail
+ rds:
+ redirect: community.aws.rds
+ rds_instance:
+ redirect: community.aws.rds_instance
+ rds_instance_info:
+ redirect: community.aws.rds_instance_info
+ rds_param_group:
+ redirect: community.aws.rds_param_group
+ rds_snapshot:
+ redirect: community.aws.rds_snapshot
+ rds_snapshot_info:
+ redirect: community.aws.rds_snapshot_info
+ rds_subnet_group:
+ redirect: community.aws.rds_subnet_group
+ redshift:
+ redirect: community.aws.redshift
+ redshift_cross_region_snapshots:
+ redirect: community.aws.redshift_cross_region_snapshots
+ redshift_info:
+ redirect: community.aws.redshift_info
+ redshift_subnet_group:
+ redirect: community.aws.redshift_subnet_group
+ route53:
+ redirect: community.aws.route53
+ route53_health_check:
+ redirect: community.aws.route53_health_check
+ route53_info:
+ redirect: community.aws.route53_info
+ route53_zone:
+ redirect: community.aws.route53_zone
+ s3_bucket_notification:
+ redirect: community.aws.s3_bucket_notification
+ s3_lifecycle:
+ redirect: community.aws.s3_lifecycle
+ s3_logging:
+ redirect: community.aws.s3_logging
+ s3_sync:
+ redirect: community.aws.s3_sync
+ s3_website:
+ redirect: community.aws.s3_website
+ sns:
+ redirect: community.aws.sns
+ sns_topic:
+ redirect: community.aws.sns_topic
+ sqs_queue:
+ redirect: community.aws.sqs_queue
+ sts_assume_role:
+ redirect: community.aws.sts_assume_role
+ sts_session_token:
+ redirect: community.aws.sts_session_token
+ ali_instance_facts:
+ redirect: community.general.ali_instance_facts
+ ali_instance:
+ redirect: community.general.ali_instance
+ ali_instance_info:
+ redirect: community.general.ali_instance_info
+ atomic_container:
+ redirect: community.general.atomic_container
+ atomic_host:
+ redirect: community.general.atomic_host
+ atomic_image:
+ redirect: community.general.atomic_image
+ clc_aa_policy:
+ redirect: community.general.clc_aa_policy
+ clc_alert_policy:
+ redirect: community.general.clc_alert_policy
+ clc_blueprint_package:
+ redirect: community.general.clc_blueprint_package
+ clc_firewall_policy:
+ redirect: community.general.clc_firewall_policy
+ clc_group:
+ redirect: community.general.clc_group
+ clc_loadbalancer:
+ redirect: community.general.clc_loadbalancer
+ clc_modify_server:
+ redirect: community.general.clc_modify_server
+ clc_publicip:
+ redirect: community.general.clc_publicip
+ clc_server:
+ redirect: community.general.clc_server
+ clc_server_snapshot:
+ redirect: community.general.clc_server_snapshot
+ cloudscale_floating_ip:
+ redirect: cloudscale_ch.cloud.floating_ip
+ cloudscale_server:
+ redirect: cloudscale_ch.cloud.server
+ cloudscale_server_group:
+ redirect: cloudscale_ch.cloud.server_group
+ cloudscale_volume:
+ redirect: cloudscale_ch.cloud.volume
+ cs_instance_facts:
+ redirect: ngine_io.cloudstack.cs_instance_info
+ cs_zone_facts:
+ redirect: ngine_io.cloudstack.cs_zone_info
+ cs_account:
+ redirect: ngine_io.cloudstack.cs_account
+ cs_affinitygroup:
+ redirect: ngine_io.cloudstack.cs_affinitygroup
+ cs_cluster:
+ redirect: ngine_io.cloudstack.cs_cluster
+ cs_configuration:
+ redirect: ngine_io.cloudstack.cs_configuration
+ cs_disk_offering:
+ redirect: ngine_io.cloudstack.cs_disk_offering
+ cs_domain:
+ redirect: ngine_io.cloudstack.cs_domain
+ cs_facts:
+ redirect: ngine_io.cloudstack.cs_facts
+ cs_firewall:
+ redirect: ngine_io.cloudstack.cs_firewall
+ cs_host:
+ redirect: ngine_io.cloudstack.cs_host
+ cs_image_store:
+ redirect: ngine_io.cloudstack.cs_image_store
+ cs_instance:
+ redirect: ngine_io.cloudstack.cs_instance
+ cs_instance_info:
+ redirect: ngine_io.cloudstack.cs_instance_info
+ cs_instance_nic:
+ redirect: ngine_io.cloudstack.cs_instance_nic
+ cs_instance_nic_secondaryip:
+ redirect: ngine_io.cloudstack.cs_instance_nic_secondaryip
+ cs_instance_password_reset:
+ redirect: ngine_io.cloudstack.cs_instance_password_reset
+ cs_instancegroup:
+ redirect: ngine_io.cloudstack.cs_instancegroup
+ cs_ip_address:
+ redirect: ngine_io.cloudstack.cs_ip_address
+ cs_iso:
+ redirect: ngine_io.cloudstack.cs_iso
+ cs_loadbalancer_rule:
+ redirect: ngine_io.cloudstack.cs_loadbalancer_rule
+ cs_loadbalancer_rule_member:
+ redirect: ngine_io.cloudstack.cs_loadbalancer_rule_member
+ cs_network:
+ redirect: ngine_io.cloudstack.cs_network
+ cs_network_acl:
+ redirect: ngine_io.cloudstack.cs_network_acl
+ cs_network_acl_rule:
+ redirect: ngine_io.cloudstack.cs_network_acl_rule
+ cs_network_offering:
+ redirect: ngine_io.cloudstack.cs_network_offering
+ cs_physical_network:
+ redirect: ngine_io.cloudstack.cs_physical_network
+ cs_pod:
+ redirect: ngine_io.cloudstack.cs_pod
+ cs_portforward:
+ redirect: ngine_io.cloudstack.cs_portforward
+ cs_project:
+ redirect: ngine_io.cloudstack.cs_project
+ cs_region:
+ redirect: ngine_io.cloudstack.cs_region
+ cs_resourcelimit:
+ redirect: ngine_io.cloudstack.cs_resourcelimit
+ cs_role:
+ redirect: ngine_io.cloudstack.cs_role
+ cs_role_permission:
+ redirect: ngine_io.cloudstack.cs_role_permission
+ cs_router:
+ redirect: ngine_io.cloudstack.cs_router
+ cs_securitygroup:
+ redirect: ngine_io.cloudstack.cs_securitygroup
+ cs_securitygroup_rule:
+ redirect: ngine_io.cloudstack.cs_securitygroup_rule
+ cs_service_offering:
+ redirect: ngine_io.cloudstack.cs_service_offering
+ cs_snapshot_policy:
+ redirect: ngine_io.cloudstack.cs_snapshot_policy
+ cs_sshkeypair:
+ redirect: ngine_io.cloudstack.cs_sshkeypair
+ cs_staticnat:
+ redirect: ngine_io.cloudstack.cs_staticnat
+ cs_storage_pool:
+ redirect: ngine_io.cloudstack.cs_storage_pool
+ cs_template:
+ redirect: ngine_io.cloudstack.cs_template
+ cs_traffic_type:
+ redirect: ngine_io.cloudstack.cs_traffic_type
+ cs_user:
+ redirect: ngine_io.cloudstack.cs_user
+ cs_vlan_ip_range:
+ redirect: ngine_io.cloudstack.cs_vlan_ip_range
+ cs_vmsnapshot:
+ redirect: ngine_io.cloudstack.cs_vmsnapshot
+ cs_volume:
+ redirect: ngine_io.cloudstack.cs_volume
+ cs_vpc:
+ redirect: ngine_io.cloudstack.cs_vpc
+ cs_vpc_offering:
+ redirect: ngine_io.cloudstack.cs_vpc_offering
+ cs_vpn_connection:
+ redirect: ngine_io.cloudstack.cs_vpn_connection
+ cs_vpn_customer_gateway:
+ redirect: ngine_io.cloudstack.cs_vpn_customer_gateway
+ cs_vpn_gateway:
+ redirect: ngine_io.cloudstack.cs_vpn_gateway
+ cs_zone:
+ redirect: ngine_io.cloudstack.cs_zone
+ cs_zone_info:
+ redirect: ngine_io.cloudstack.cs_zone_info
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ digital_ocean_account_facts:
+ redirect: community.digitalocean.digital_ocean_account_facts
+ digital_ocean_certificate_facts:
+ redirect: community.digitalocean.digital_ocean_certificate_facts
+ digital_ocean_domain_facts:
+ redirect: community.digitalocean.digital_ocean_domain_facts
+ digital_ocean_firewall_facts:
+ redirect: community.digitalocean.digital_ocean_firewall_facts
+ digital_ocean_floating_ip_facts:
+ redirect: community.digitalocean.digital_ocean_floating_ip_facts
+ digital_ocean_image_facts:
+ redirect: community.digitalocean.digital_ocean_image_facts
+ digital_ocean_load_balancer_facts:
+ redirect: community.digitalocean.digital_ocean_load_balancer_facts
+ digital_ocean_region_facts:
+ redirect: community.digitalocean.digital_ocean_region_facts
+ digital_ocean_size_facts:
+ redirect: community.digitalocean.digital_ocean_size_facts
+ digital_ocean_snapshot_facts:
+ redirect: community.digitalocean.digital_ocean_snapshot_facts
+ digital_ocean_sshkey_facts:
+ redirect: community.digitalocean.digital_ocean_sshkey_facts
+ digital_ocean_tag_facts:
+ redirect: community.digitalocean.digital_ocean_tag_facts
+ digital_ocean_volume_facts:
+ redirect: community.digitalocean.digital_ocean_volume_facts
+ digital_ocean_account_info:
+ redirect: community.digitalocean.digital_ocean_account_info
+ digital_ocean_block_storage:
+ redirect: community.digitalocean.digital_ocean_block_storage
+ digital_ocean_certificate:
+ redirect: community.digitalocean.digital_ocean_certificate
+ digital_ocean_certificate_info:
+ redirect: community.digitalocean.digital_ocean_certificate_info
+ digital_ocean_domain:
+ redirect: community.digitalocean.digital_ocean_domain
+ digital_ocean_domain_info:
+ redirect: community.digitalocean.digital_ocean_domain_info
+ digital_ocean_droplet:
+ redirect: community.digitalocean.digital_ocean_droplet
+ digital_ocean_firewall_info:
+ redirect: community.digitalocean.digital_ocean_firewall_info
+ digital_ocean_floating_ip:
+ redirect: community.digitalocean.digital_ocean_floating_ip
+ digital_ocean_floating_ip_info:
+ redirect: community.digitalocean.digital_ocean_floating_ip_info
+ digital_ocean_image_info:
+ redirect: community.digitalocean.digital_ocean_image_info
+ digital_ocean_load_balancer_info:
+ redirect: community.digitalocean.digital_ocean_load_balancer_info
+ digital_ocean_region_info:
+ redirect: community.digitalocean.digital_ocean_region_info
+ digital_ocean_size_info:
+ redirect: community.digitalocean.digital_ocean_size_info
+ digital_ocean_snapshot_info:
+ redirect: community.digitalocean.digital_ocean_snapshot_info
+ digital_ocean_sshkey:
+ redirect: community.digitalocean.digital_ocean_sshkey
+ digital_ocean_sshkey_info:
+ redirect: community.digitalocean.digital_ocean_sshkey_info
+ digital_ocean_tag:
+ redirect: community.digitalocean.digital_ocean_tag
+ digital_ocean_tag_info:
+ redirect: community.digitalocean.digital_ocean_tag_info
+ digital_ocean_volume_info:
+ redirect: community.digitalocean.digital_ocean_volume_info
+ dimensiondata_network:
+ redirect: community.general.dimensiondata_network
+ dimensiondata_vlan:
+ redirect: community.general.dimensiondata_vlan
+ docker_image_facts:
+ redirect: community.general.docker_image_facts
+ docker_service:
+ redirect: community.general.docker_service
+ docker_compose:
+ redirect: community.docker.docker_compose
+ docker_config:
+ redirect: community.docker.docker_config
+ docker_container:
+ redirect: community.docker.docker_container
+ docker_container_info:
+ redirect: community.docker.docker_container_info
+ docker_host_info:
+ redirect: community.docker.docker_host_info
+ docker_image:
+ redirect: community.docker.docker_image
+ docker_image_info:
+ redirect: community.docker.docker_image_info
+ docker_login:
+ redirect: community.docker.docker_login
+ docker_network:
+ redirect: community.docker.docker_network
+ docker_network_info:
+ redirect: community.docker.docker_network_info
+ docker_node:
+ redirect: community.docker.docker_node
+ docker_node_info:
+ redirect: community.docker.docker_node_info
+ docker_prune:
+ redirect: community.docker.docker_prune
+ docker_secret:
+ redirect: community.docker.docker_secret
+ docker_stack:
+ redirect: community.docker.docker_stack
+ docker_swarm:
+ redirect: community.docker.docker_swarm
+ docker_swarm_info:
+ redirect: community.docker.docker_swarm_info
+ docker_swarm_service:
+ redirect: community.docker.docker_swarm_service
+ docker_swarm_service_info:
+ redirect: community.docker.docker_swarm_service_info
+ docker_volume:
+ redirect: community.docker.docker_volume
+ docker_volume_info:
+ redirect: community.docker.docker_volume_info
+ gcdns_record:
+ redirect: community.general.gcdns_record
+ gcdns_zone:
+ redirect: community.general.gcdns_zone
+ gce:
+ redirect: community.general.gce
+ gcp_backend_service:
+ redirect: community.general.gcp_backend_service
+ gcp_bigquery_dataset_facts:
+ redirect: google.cloud.gcp_bigquery_dataset_info
+ gcp_bigquery_table_facts:
+ redirect: google.cloud.gcp_bigquery_table_info
+ gcp_cloudbuild_trigger_facts:
+ redirect: google.cloud.gcp_cloudbuild_trigger_info
+ gcp_compute_address_facts:
+ redirect: google.cloud.gcp_compute_address_info
+ gcp_compute_backend_bucket_facts:
+ redirect: google.cloud.gcp_compute_backend_bucket_info
+ gcp_compute_backend_service_facts:
+ redirect: google.cloud.gcp_compute_backend_service_info
+ gcp_compute_disk_facts:
+ redirect: google.cloud.gcp_compute_disk_info
+ gcp_compute_firewall_facts:
+ redirect: google.cloud.gcp_compute_firewall_info
+ gcp_compute_forwarding_rule_facts:
+ redirect: google.cloud.gcp_compute_forwarding_rule_info
+ gcp_compute_global_address_facts:
+ redirect: google.cloud.gcp_compute_global_address_info
+ gcp_compute_global_forwarding_rule_facts:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule_info
+ gcp_compute_health_check_facts:
+ redirect: google.cloud.gcp_compute_health_check_info
+ gcp_compute_http_health_check_facts:
+ redirect: google.cloud.gcp_compute_http_health_check_info
+ gcp_compute_https_health_check_facts:
+ redirect: google.cloud.gcp_compute_https_health_check_info
+ gcp_compute_image_facts:
+ redirect: google.cloud.gcp_compute_image_info
+ gcp_compute_instance_facts:
+ redirect: google.cloud.gcp_compute_instance_info
+ gcp_compute_instance_group_facts:
+ redirect: google.cloud.gcp_compute_instance_group_info
+ gcp_compute_instance_group_manager_facts:
+ redirect: google.cloud.gcp_compute_instance_group_manager_info
+ gcp_compute_instance_template_facts:
+ redirect: google.cloud.gcp_compute_instance_template_info
+ gcp_compute_interconnect_attachment_facts:
+ redirect: google.cloud.gcp_compute_interconnect_attachment_info
+ gcp_compute_network_facts:
+ redirect: google.cloud.gcp_compute_network_info
+ gcp_compute_region_disk_facts:
+ redirect: google.cloud.gcp_compute_region_disk_info
+ gcp_compute_route_facts:
+ redirect: google.cloud.gcp_compute_route_info
+ gcp_compute_router_facts:
+ redirect: google.cloud.gcp_compute_router_info
+ gcp_compute_ssl_certificate_facts:
+ redirect: google.cloud.gcp_compute_ssl_certificate_info
+ gcp_compute_ssl_policy_facts:
+ redirect: google.cloud.gcp_compute_ssl_policy_info
+ gcp_compute_subnetwork_facts:
+ redirect: google.cloud.gcp_compute_subnetwork_info
+ gcp_compute_target_http_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_http_proxy_info
+ gcp_compute_target_https_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_https_proxy_info
+ gcp_compute_target_pool_facts:
+ redirect: google.cloud.gcp_compute_target_pool_info
+ gcp_compute_target_ssl_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy_info
+ gcp_compute_target_tcp_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy_info
+ gcp_compute_target_vpn_gateway_facts:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway_info
+ gcp_compute_url_map_facts:
+ redirect: google.cloud.gcp_compute_url_map_info
+ gcp_compute_vpn_tunnel_facts:
+ redirect: google.cloud.gcp_compute_vpn_tunnel_info
+ gcp_container_cluster_facts:
+ redirect: google.cloud.gcp_container_cluster_info
+ gcp_container_node_pool_facts:
+ redirect: google.cloud.gcp_container_node_pool_info
+ gcp_dns_managed_zone_facts:
+ redirect: google.cloud.gcp_dns_managed_zone_info
+ gcp_dns_resource_record_set_facts:
+ redirect: google.cloud.gcp_dns_resource_record_set_info
+ gcp_forwarding_rule:
+ redirect: community.general.gcp_forwarding_rule
+ gcp_healthcheck:
+ redirect: community.general.gcp_healthcheck
+ gcp_iam_role_facts:
+ redirect: google.cloud.gcp_iam_role_info
+ gcp_iam_service_account_facts:
+ redirect: google.cloud.gcp_iam_service_account_info
+ gcp_pubsub_subscription_facts:
+ redirect: google.cloud.gcp_pubsub_subscription_info
+ gcp_pubsub_topic_facts:
+ redirect: google.cloud.gcp_pubsub_topic_info
+ gcp_redis_instance_facts:
+ redirect: google.cloud.gcp_redis_instance_info
+ gcp_resourcemanager_project_facts:
+ redirect: google.cloud.gcp_resourcemanager_project_info
+ gcp_sourcerepo_repository_facts:
+ redirect: google.cloud.gcp_sourcerepo_repository_info
+ gcp_spanner_database_facts:
+ redirect: google.cloud.gcp_spanner_database_info
+ gcp_spanner_instance_facts:
+ redirect: google.cloud.gcp_spanner_instance_info
+ gcp_sql_database_facts:
+ redirect: google.cloud.gcp_sql_database_info
+ gcp_sql_instance_facts:
+ redirect: google.cloud.gcp_sql_instance_info
+ gcp_sql_user_facts:
+ redirect: google.cloud.gcp_sql_user_info
+ gcp_target_proxy:
+ redirect: community.general.gcp_target_proxy
+ gcp_tpu_node_facts:
+ redirect: google.cloud.gcp_tpu_node_info
+ gcp_url_map:
+ redirect: community.general.gcp_url_map
+ gcpubsub_facts:
+ redirect: community.general.gcpubsub_facts
+ gcspanner:
+ redirect: community.general.gcspanner
+ gc_storage:
+ redirect: community.google.gc_storage
+ gce_eip:
+ redirect: community.google.gce_eip
+ gce_img:
+ redirect: community.google.gce_img
+ gce_instance_template:
+ redirect: community.google.gce_instance_template
+ gce_labels:
+ redirect: community.google.gce_labels
+ gce_lb:
+ redirect: community.google.gce_lb
+ gce_mig:
+ redirect: community.google.gce_mig
+ gce_net:
+ redirect: community.google.gce_net
+ gce_pd:
+ redirect: community.google.gce_pd
+ gce_snapshot:
+ redirect: community.google.gce_snapshot
+ gce_tag:
+ redirect: community.google.gce_tag
+ gcpubsub:
+ redirect: community.google.gcpubsub
+ gcpubsub_info:
+ redirect: community.google.gcpubsub_info
+ heroku_collaborator:
+ redirect: community.general.heroku_collaborator
+ hwc_ecs_instance:
+ redirect: community.general.hwc_ecs_instance
+ hwc_evs_disk:
+ redirect: community.general.hwc_evs_disk
+ hwc_network_vpc:
+ redirect: community.general.hwc_network_vpc
+ hwc_smn_topic:
+ redirect: community.general.hwc_smn_topic
+ hwc_vpc_eip:
+ redirect: community.general.hwc_vpc_eip
+ hwc_vpc_peering_connect:
+ redirect: community.general.hwc_vpc_peering_connect
+ hwc_vpc_port:
+ redirect: community.general.hwc_vpc_port
+ hwc_vpc_private_ip:
+ redirect: community.general.hwc_vpc_private_ip
+ hwc_vpc_route:
+ redirect: community.general.hwc_vpc_route
+ hwc_vpc_security_group:
+ redirect: community.general.hwc_vpc_security_group
+ hwc_vpc_security_group_rule:
+ redirect: community.general.hwc_vpc_security_group_rule
+ hwc_vpc_subnet:
+ redirect: community.general.hwc_vpc_subnet
+ kubevirt_cdi_upload:
+ redirect: community.kubevirt.kubevirt_cdi_upload
+ kubevirt_preset:
+ redirect: community.kubevirt.kubevirt_preset
+ kubevirt_pvc:
+ redirect: community.kubevirt.kubevirt_pvc
+ kubevirt_rs:
+ redirect: community.kubevirt.kubevirt_rs
+ kubevirt_template:
+ redirect: community.kubevirt.kubevirt_template
+ kubevirt_vm:
+ redirect: community.kubevirt.kubevirt_vm
+ linode:
+ redirect: community.general.linode
+ linode_v4:
+ redirect: community.general.linode_v4
+ lxc_container:
+ redirect: community.general.lxc_container
+ lxd_container:
+ redirect: community.general.lxd_container
+ lxd_profile:
+ redirect: community.general.lxd_profile
+ memset_memstore_facts:
+ redirect: community.general.memset_memstore_facts
+ memset_server_facts:
+ redirect: community.general.memset_server_facts
+ memset_dns_reload:
+ redirect: community.general.memset_dns_reload
+ memset_memstore_info:
+ redirect: community.general.memset_memstore_info
+ memset_server_info:
+ redirect: community.general.memset_server_info
+ memset_zone:
+ redirect: community.general.memset_zone
+ memset_zone_domain:
+ redirect: community.general.memset_zone_domain
+ memset_zone_record:
+ redirect: community.general.memset_zone_record
+ cloud_init_data_facts:
+ redirect: community.general.cloud_init_data_facts
+ helm:
+ redirect: community.general.helm
+ ovirt:
+ redirect: community.general.ovirt
+ proxmox:
+ redirect: community.general.proxmox
+ proxmox_kvm:
+ redirect: community.general.proxmox_kvm
+ proxmox_template:
+ redirect: community.general.proxmox_template
+ rhevm:
+ redirect: community.general.rhevm
+ serverless:
+ redirect: community.general.serverless
+ terraform:
+ redirect: community.general.terraform
+ virt:
+ redirect: community.libvirt.virt
+ virt_net:
+ redirect: community.libvirt.virt_net
+ virt_pool:
+ redirect: community.libvirt.virt_pool
+ xenserver_facts:
+ redirect: community.general.xenserver_facts
+ oneandone_firewall_policy:
+ redirect: community.general.oneandone_firewall_policy
+ oneandone_load_balancer:
+ redirect: community.general.oneandone_load_balancer
+ oneandone_monitoring_policy:
+ redirect: community.general.oneandone_monitoring_policy
+ oneandone_private_network:
+ redirect: community.general.oneandone_private_network
+ oneandone_public_ip:
+ redirect: community.general.oneandone_public_ip
+ oneandone_server:
+ redirect: community.general.oneandone_server
+ online_server_facts:
+ redirect: community.general.online_server_facts
+ online_user_facts:
+ redirect: community.general.online_user_facts
+ online_server_info:
+ redirect: community.general.online_server_info
+ online_user_info:
+ redirect: community.general.online_user_info
+ one_image_facts:
+ redirect: community.general.one_image_facts
+ one_host:
+ redirect: community.general.one_host
+ one_image:
+ redirect: community.general.one_image
+ one_image_info:
+ redirect: community.general.one_image_info
+ one_service:
+ redirect: community.general.one_service
+ one_vm:
+ redirect: community.general.one_vm
+ os_flavor_facts:
+ redirect: openstack.cloud.os_flavor_info
+ os_image_facts:
+ redirect: openstack.cloud.os_image_info
+ os_keystone_domain_facts:
+ redirect: openstack.cloud.os_keystone_domain_info
+ os_networks_facts:
+ redirect: openstack.cloud.os_networks_info
+ os_port_facts:
+ redirect: openstack.cloud.os_port_info
+ os_project_facts:
+ redirect: openstack.cloud.os_project_info
+ os_server_facts:
+ redirect: openstack.cloud.os_server_info
+ os_subnets_facts:
+ redirect: openstack.cloud.os_subnets_info
+ os_user_facts:
+ redirect: openstack.cloud.os_user_info
+ oci_vcn:
+ redirect: community.general.oci_vcn
+ ovh_ip_failover:
+ redirect: community.general.ovh_ip_failover
+ ovh_ip_loadbalancing_backend:
+ redirect: community.general.ovh_ip_loadbalancing_backend
+ ovh_monthly_billing:
+ redirect: community.general.ovh_monthly_billing
+ ovirt_affinity_label_facts:
+ redirect: community.general.ovirt_affinity_label_facts
+ ovirt_api_facts:
+ redirect: community.general.ovirt_api_facts
+ ovirt_cluster_facts:
+ redirect: community.general.ovirt_cluster_facts
+ ovirt_datacenter_facts:
+ redirect: community.general.ovirt_datacenter_facts
+ ovirt_disk_facts:
+ redirect: community.general.ovirt_disk_facts
+ ovirt_event_facts:
+ redirect: community.general.ovirt_event_facts
+ ovirt_external_provider_facts:
+ redirect: community.general.ovirt_external_provider_facts
+ ovirt_group_facts:
+ redirect: community.general.ovirt_group_facts
+ ovirt_host_facts:
+ redirect: community.general.ovirt_host_facts
+ ovirt_host_storage_facts:
+ redirect: community.general.ovirt_host_storage_facts
+ ovirt_network_facts:
+ redirect: community.general.ovirt_network_facts
+ ovirt_nic_facts:
+ redirect: community.general.ovirt_nic_facts
+ ovirt_permission_facts:
+ redirect: community.general.ovirt_permission_facts
+ ovirt_quota_facts:
+ redirect: community.general.ovirt_quota_facts
+ ovirt_scheduling_policy_facts:
+ redirect: community.general.ovirt_scheduling_policy_facts
+ ovirt_snapshot_facts:
+ redirect: community.general.ovirt_snapshot_facts
+ ovirt_storage_domain_facts:
+ redirect: community.general.ovirt_storage_domain_facts
+ ovirt_storage_template_facts:
+ redirect: community.general.ovirt_storage_template_facts
+ ovirt_storage_vm_facts:
+ redirect: community.general.ovirt_storage_vm_facts
+ ovirt_tag_facts:
+ redirect: community.general.ovirt_tag_facts
+ ovirt_template_facts:
+ redirect: community.general.ovirt_template_facts
+ ovirt_user_facts:
+ redirect: community.general.ovirt_user_facts
+ ovirt_vm_facts:
+ redirect: community.general.ovirt_vm_facts
+ ovirt_vmpool_facts:
+ redirect: community.general.ovirt_vmpool_facts
+ packet_device:
+ redirect: community.general.packet_device
+ packet_ip_subnet:
+ redirect: community.general.packet_ip_subnet
+ packet_project:
+ redirect: community.general.packet_project
+ packet_sshkey:
+ redirect: community.general.packet_sshkey
+ packet_volume:
+ redirect: community.general.packet_volume
+ packet_volume_attachment:
+ redirect: community.general.packet_volume_attachment
+ profitbricks:
+ redirect: community.general.profitbricks
+ profitbricks_datacenter:
+ redirect: community.general.profitbricks_datacenter
+ profitbricks_nic:
+ redirect: community.general.profitbricks_nic
+ profitbricks_volume:
+ redirect: community.general.profitbricks_volume
+ profitbricks_volume_attachments:
+ redirect: community.general.profitbricks_volume_attachments
+ pubnub_blocks:
+ redirect: community.general.pubnub_blocks
+ rax:
+ redirect: community.general.rax
+ rax_cbs:
+ redirect: community.general.rax_cbs
+ rax_cbs_attachments:
+ redirect: community.general.rax_cbs_attachments
+ rax_cdb:
+ redirect: community.general.rax_cdb
+ rax_cdb_database:
+ redirect: community.general.rax_cdb_database
+ rax_cdb_user:
+ redirect: community.general.rax_cdb_user
+ rax_clb:
+ redirect: community.general.rax_clb
+ rax_clb_nodes:
+ redirect: community.general.rax_clb_nodes
+ rax_clb_ssl:
+ redirect: community.general.rax_clb_ssl
+ rax_dns:
+ redirect: community.general.rax_dns
+ rax_dns_record:
+ redirect: community.general.rax_dns_record
+ rax_facts:
+ redirect: community.general.rax_facts
+ rax_files:
+ redirect: community.general.rax_files
+ rax_files_objects:
+ redirect: community.general.rax_files_objects
+ rax_identity:
+ redirect: community.general.rax_identity
+ rax_keypair:
+ redirect: community.general.rax_keypair
+ rax_meta:
+ redirect: community.general.rax_meta
+ rax_mon_alarm:
+ redirect: community.general.rax_mon_alarm
+ rax_mon_check:
+ redirect: community.general.rax_mon_check
+ rax_mon_entity:
+ redirect: community.general.rax_mon_entity
+ rax_mon_notification:
+ redirect: community.general.rax_mon_notification
+ rax_mon_notification_plan:
+ redirect: community.general.rax_mon_notification_plan
+ rax_network:
+ redirect: community.general.rax_network
+ rax_queue:
+ redirect: community.general.rax_queue
+ rax_scaling_group:
+ redirect: community.general.rax_scaling_group
+ rax_scaling_policy:
+ redirect: community.general.rax_scaling_policy
+ scaleway_image_facts:
+ redirect: community.general.scaleway_image_facts
+ scaleway_ip_facts:
+ redirect: community.general.scaleway_ip_facts
+ scaleway_organization_facts:
+ redirect: community.general.scaleway_organization_facts
+ scaleway_security_group_facts:
+ redirect: community.general.scaleway_security_group_facts
+ scaleway_server_facts:
+ redirect: community.general.scaleway_server_facts
+ scaleway_snapshot_facts:
+ redirect: community.general.scaleway_snapshot_facts
+ scaleway_volume_facts:
+ redirect: community.general.scaleway_volume_facts
+ scaleway_compute:
+ redirect: community.general.scaleway_compute
+ scaleway_image_info:
+ redirect: community.general.scaleway_image_info
+ scaleway_ip:
+ redirect: community.general.scaleway_ip
+ scaleway_ip_info:
+ redirect: community.general.scaleway_ip_info
+ scaleway_lb:
+ redirect: community.general.scaleway_lb
+ scaleway_organization_info:
+ redirect: community.general.scaleway_organization_info
+ scaleway_security_group:
+ redirect: community.general.scaleway_security_group
+ scaleway_security_group_info:
+ redirect: community.general.scaleway_security_group_info
+ scaleway_security_group_rule:
+ redirect: community.general.scaleway_security_group_rule
+ scaleway_server_info:
+ redirect: community.general.scaleway_server_info
+ scaleway_snapshot_info:
+ redirect: community.general.scaleway_snapshot_info
+ scaleway_sshkey:
+ redirect: community.general.scaleway_sshkey
+ scaleway_user_data:
+ redirect: community.general.scaleway_user_data
+ scaleway_volume:
+ redirect: community.general.scaleway_volume
+ scaleway_volume_info:
+ redirect: community.general.scaleway_volume_info
+ smartos_image_facts:
+ redirect: community.general.smartos_image_facts
+ imgadm:
+ redirect: community.general.imgadm
+ nictagadm:
+ redirect: community.general.nictagadm
+ smartos_image_info:
+ redirect: community.general.smartos_image_info
+ vmadm:
+ redirect: community.general.vmadm
+ sl_vm:
+ redirect: community.general.sl_vm
+ spotinst_aws_elastigroup:
+ redirect: community.general.spotinst_aws_elastigroup
+ udm_dns_record:
+ redirect: community.general.udm_dns_record
+ udm_dns_zone:
+ redirect: community.general.udm_dns_zone
+ udm_group:
+ redirect: community.general.udm_group
+ udm_share:
+ redirect: community.general.udm_share
+ udm_user:
+ redirect: community.general.udm_user
+ vr_account_facts:
+ redirect: ngine_io.vultr.vultr_account_facts
+ vr_dns_domain:
+ redirect: ngine_io.vultr.vultr_dns_domain
+ vr_dns_record:
+ redirect: ngine_io.vultr.vultr_dns_record
+ vr_firewall_group:
+ redirect: ngine_io.vultr.vultr_firewall_group
+ vr_firewall_rule:
+ redirect: ngine_io.vultr.vultr_firewall_rule
+ vr_server:
+ redirect: ngine_io.vultr.vultr_server
+ vr_ssh_key:
+ redirect: ngine_io.vultr.vultr_ssh_key
+ vr_startup_script:
+ redirect: ngine_io.vultr.vultr_startup_script
+ vr_user:
+ redirect: ngine_io.vultr.vultr_user
+ vultr_account_facts:
+ redirect: ngine_io.vultr.vultr_account_info
+ vultr_block_storage_facts:
+ redirect: ngine_io.vultr.vultr_block_storage_info
+ vultr_dns_domain_facts:
+ redirect: ngine_io.vultr.vultr_dns_domain_info
+ vultr_firewall_group_facts:
+ redirect: ngine_io.vultr.vultr_firewall_group_info
+ vultr_network_facts:
+ redirect: ngine_io.vultr.vultr_network_info
+ vultr_os_facts:
+ redirect: ngine_io.vultr.vultr_os_info
+ vultr_plan_facts:
+ redirect: ngine_io.vultr.vultr_plan_info
+ vultr_region_facts:
+ redirect: ngine_io.vultr.vultr_region_info
+ vultr_server_facts:
+ redirect: ngine_io.vultr.vultr_server_info
+ vultr_ssh_key_facts:
+ redirect: ngine_io.vultr.vultr_ssh_key_info
+ vultr_startup_script_facts:
+ redirect: ngine_io.vultr.vultr_startup_script_info
+ vultr_user_facts:
+ redirect: ngine_io.vultr.vultr_user_info
+ vultr_account_info:
+ redirect: ngine_io.vultr.vultr_account_info
+ vultr_block_storage:
+ redirect: ngine_io.vultr.vultr_block_storage
+ vultr_block_storage_info:
+ redirect: ngine_io.vultr.vultr_block_storage_info
+ vultr_dns_domain:
+ redirect: ngine_io.vultr.vultr_dns_domain
+ vultr_dns_domain_info:
+ redirect: ngine_io.vultr.vultr_dns_domain_info
+ vultr_dns_record:
+ redirect: ngine_io.vultr.vultr_dns_record
+ vultr_firewall_group:
+ redirect: ngine_io.vultr.vultr_firewall_group
+ vultr_firewall_group_info:
+ redirect: ngine_io.vultr.vultr_firewall_group_info
+ vultr_firewall_rule:
+ redirect: ngine_io.vultr.vultr_firewall_rule
+ vultr_network:
+ redirect: ngine_io.vultr.vultr_network
+ vultr_network_info:
+ redirect: ngine_io.vultr.vultr_network_info
+ vultr_os_info:
+ redirect: ngine_io.vultr.vultr_os_info
+ vultr_plan_info:
+ redirect: ngine_io.vultr.vultr_plan_info
+ vultr_region_info:
+ redirect: ngine_io.vultr.vultr_region_info
+ vultr_server:
+ redirect: ngine_io.vultr.vultr_server
+ vultr_server_info:
+ redirect: ngine_io.vultr.vultr_server_info
+ vultr_ssh_key:
+ redirect: ngine_io.vultr.vultr_ssh_key
+ vultr_ssh_key_info:
+ redirect: ngine_io.vultr.vultr_ssh_key_info
+ vultr_startup_script:
+ redirect: ngine_io.vultr.vultr_startup_script
+ vultr_startup_script_info:
+ redirect: ngine_io.vultr.vultr_startup_script_info
+ vultr_user:
+ redirect: ngine_io.vultr.vultr_user
+ vultr_user_info:
+ redirect: ngine_io.vultr.vultr_user_info
+ webfaction_app:
+ redirect: community.general.webfaction_app
+ webfaction_db:
+ redirect: community.general.webfaction_db
+ webfaction_domain:
+ redirect: community.general.webfaction_domain
+ webfaction_mailbox:
+ redirect: community.general.webfaction_mailbox
+ webfaction_site:
+ redirect: community.general.webfaction_site
+ xenserver_guest_facts:
+ redirect: community.general.xenserver_guest_facts
+ xenserver_guest:
+ redirect: community.general.xenserver_guest
+ xenserver_guest_info:
+ redirect: community.general.xenserver_guest_info
+ xenserver_guest_powerstate:
+ redirect: community.general.xenserver_guest_powerstate
+ consul:
+ redirect: community.general.consul
+ consul_acl:
+ redirect: community.general.consul_acl
+ consul_kv:
+ redirect: community.general.consul_kv
+ consul_session:
+ redirect: community.general.consul_session
+ etcd3:
+ redirect: community.general.etcd3
+ pacemaker_cluster:
+ redirect: community.general.pacemaker_cluster
+ znode:
+ redirect: community.general.znode
+ aerospike_migrations:
+ redirect: community.general.aerospike_migrations
+ influxdb_database:
+ redirect: community.general.influxdb_database
+ influxdb_query:
+ redirect: community.general.influxdb_query
+ influxdb_retention_policy:
+ redirect: community.general.influxdb_retention_policy
+ influxdb_user:
+ redirect: community.general.influxdb_user
+ influxdb_write:
+ redirect: community.general.influxdb_write
+ elasticsearch_plugin:
+ redirect: community.general.elasticsearch_plugin
+ kibana_plugin:
+ redirect: community.general.kibana_plugin
+ redis:
+ redirect: community.general.redis
+ riak:
+ redirect: community.general.riak
+ mssql_db:
+ redirect: community.general.mssql_db
+ mysql_db:
+ redirect: community.mysql.mysql_db
+ mysql_info:
+ redirect: community.mysql.mysql_info
+ mysql_query:
+ redirect: community.mysql.mysql_query
+ mysql_replication:
+ redirect: community.mysql.mysql_replication
+ mysql_user:
+ redirect: community.mysql.mysql_user
+ mysql_variables:
+ redirect: community.mysql.mysql_variables
+ postgresql_copy:
+ redirect: community.postgresql.postgresql_copy
+ postgresql_db:
+ redirect: community.postgresql.postgresql_db
+ postgresql_ext:
+ redirect: community.postgresql.postgresql_ext
+ postgresql_idx:
+ redirect: community.postgresql.postgresql_idx
+ postgresql_info:
+ redirect: community.postgresql.postgresql_info
+ postgresql_lang:
+ redirect: community.postgresql.postgresql_lang
+ postgresql_membership:
+ redirect: community.postgresql.postgresql_membership
+ postgresql_owner:
+ redirect: community.postgresql.postgresql_owner
+ postgresql_pg_hba:
+ redirect: community.postgresql.postgresql_pg_hba
+ postgresql_ping:
+ redirect: community.postgresql.postgresql_ping
+ postgresql_privs:
+ redirect: community.postgresql.postgresql_privs
+ postgresql_publication:
+ redirect: community.postgresql.postgresql_publication
+ postgresql_query:
+ redirect: community.postgresql.postgresql_query
+ postgresql_schema:
+ redirect: community.postgresql.postgresql_schema
+ postgresql_sequence:
+ redirect: community.postgresql.postgresql_sequence
+ postgresql_set:
+ redirect: community.postgresql.postgresql_set
+ postgresql_slot:
+ redirect: community.postgresql.postgresql_slot
+ postgresql_subscription:
+ redirect: community.postgresql.postgresql_subscription
+ postgresql_table:
+ redirect: community.postgresql.postgresql_table
+ postgresql_tablespace:
+ redirect: community.postgresql.postgresql_tablespace
+ postgresql_user:
+ redirect: community.postgresql.postgresql_user
+ postgresql_user_obj_stat_info:
+ redirect: community.postgresql.postgresql_user_obj_stat_info
+ proxysql_backend_servers:
+ redirect: community.proxysql.proxysql_backend_servers
+ proxysql_global_variables:
+ redirect: community.proxysql.proxysql_global_variables
+ proxysql_manage_config:
+ redirect: community.proxysql.proxysql_manage_config
+ proxysql_mysql_users:
+ redirect: community.proxysql.proxysql_mysql_users
+ proxysql_query_rules:
+ redirect: community.proxysql.proxysql_query_rules
+ proxysql_replication_hostgroups:
+ redirect: community.proxysql.proxysql_replication_hostgroups
+ proxysql_scheduler:
+ redirect: community.proxysql.proxysql_scheduler
+ vertica_facts:
+ redirect: community.general.vertica_facts
+ vertica_configuration:
+ redirect: community.general.vertica_configuration
+ vertica_info:
+ redirect: community.general.vertica_info
+ vertica_role:
+ redirect: community.general.vertica_role
+ vertica_schema:
+ redirect: community.general.vertica_schema
+ vertica_user:
+ redirect: community.general.vertica_user
+ archive:
+ redirect: community.general.archive
+ ini_file:
+ redirect: community.general.ini_file
+ iso_extract:
+ redirect: community.general.iso_extract
+ patch:
+ redirect: ansible.posix.patch
+ read_csv:
+ redirect: community.general.read_csv
+ xattr:
+ redirect: community.general.xattr
+ xml:
+ redirect: community.general.xml
+ onepassword_facts:
+ redirect: community.general.onepassword_facts
+ ipa_config:
+ redirect: community.general.ipa_config
+ ipa_dnsrecord:
+ redirect: community.general.ipa_dnsrecord
+ ipa_dnszone:
+ redirect: community.general.ipa_dnszone
+ ipa_group:
+ redirect: community.general.ipa_group
+ ipa_hbacrule:
+ redirect: community.general.ipa_hbacrule
+ ipa_host:
+ redirect: community.general.ipa_host
+ ipa_hostgroup:
+ redirect: community.general.ipa_hostgroup
+ ipa_role:
+ redirect: community.general.ipa_role
+ ipa_service:
+ redirect: community.general.ipa_service
+ ipa_subca:
+ redirect: community.general.ipa_subca
+ ipa_sudocmd:
+ redirect: community.general.ipa_sudocmd
+ ipa_sudocmdgroup:
+ redirect: community.general.ipa_sudocmdgroup
+ ipa_sudorule:
+ redirect: community.general.ipa_sudorule
+ ipa_user:
+ redirect: community.general.ipa_user
+ ipa_vault:
+ redirect: community.general.ipa_vault
+ keycloak_client:
+ redirect: community.general.keycloak_client
+ keycloak_clienttemplate:
+ redirect: community.general.keycloak_clienttemplate
+ keycloak_group:
+ redirect: community.general.keycloak_group
+ onepassword_info:
+ redirect: community.general.onepassword_info
+ opendj_backendprop:
+ redirect: community.general.opendj_backendprop
+ rabbitmq_binding:
+ redirect: community.rabbitmq.rabbitmq_binding
+ rabbitmq_exchange:
+ redirect: community.rabbitmq.rabbitmq_exchange
+ rabbitmq_global_parameter:
+ redirect: community.rabbitmq.rabbitmq_global_parameter
+ rabbitmq_parameter:
+ redirect: community.rabbitmq.rabbitmq_parameter
+ rabbitmq_plugin:
+ redirect: community.rabbitmq.rabbitmq_plugin
+ rabbitmq_policy:
+ redirect: community.rabbitmq.rabbitmq_policy
+ rabbitmq_queue:
+ redirect: community.rabbitmq.rabbitmq_queue
+ rabbitmq_user:
+ redirect: community.rabbitmq.rabbitmq_user
+ rabbitmq_vhost:
+ redirect: community.rabbitmq.rabbitmq_vhost
+ rabbitmq_vhost_limits:
+ redirect: community.rabbitmq.rabbitmq_vhost_limits
+ airbrake_deployment:
+ redirect: community.general.airbrake_deployment
+ bigpanda:
+ redirect: community.general.bigpanda
+ circonus_annotation:
+ redirect: community.general.circonus_annotation
+ datadog_event:
+ redirect: community.general.datadog_event
+ datadog_monitor:
+ redirect: community.general.datadog_monitor
+ honeybadger_deployment:
+ redirect: community.general.honeybadger_deployment
+ icinga2_feature:
+ redirect: community.general.icinga2_feature
+ icinga2_host:
+ redirect: community.general.icinga2_host
+ librato_annotation:
+ redirect: community.general.librato_annotation
+ logentries:
+ redirect: community.general.logentries
+ logicmonitor:
+ redirect: community.general.logicmonitor
+ logicmonitor_facts:
+ redirect: community.general.logicmonitor_facts
+ logstash_plugin:
+ redirect: community.general.logstash_plugin
+ monit:
+ redirect: community.general.monit
+ nagios:
+ redirect: community.general.nagios
+ newrelic_deployment:
+ redirect: community.general.newrelic_deployment
+ pagerduty:
+ redirect: community.general.pagerduty
+ pagerduty_alert:
+ redirect: community.general.pagerduty_alert
+ pingdom:
+ redirect: community.general.pingdom
+ rollbar_deployment:
+ redirect: community.general.rollbar_deployment
+ sensu_check:
+ redirect: community.general.sensu_check
+ sensu_client:
+ redirect: community.general.sensu_client
+ sensu_handler:
+ redirect: community.general.sensu_handler
+ sensu_silence:
+ redirect: community.general.sensu_silence
+ sensu_subscription:
+ redirect: community.general.sensu_subscription
+ spectrum_device:
+ redirect: community.general.spectrum_device
+ stackdriver:
+ redirect: community.general.stackdriver
+ statusio_maintenance:
+ redirect: community.general.statusio_maintenance
+ uptimerobot:
+ redirect: community.general.uptimerobot
+ zabbix_group_facts:
+ redirect: community.zabbix.zabbix_group_facts
+ zabbix_host_facts:
+ redirect: community.zabbix.zabbix_host_facts
+ zabbix_action:
+ redirect: community.zabbix.zabbix_action
+ zabbix_group:
+ redirect: community.zabbix.zabbix_group
+ zabbix_group_info:
+ redirect: community.zabbix.zabbix_group_info
+ zabbix_host:
+ redirect: community.zabbix.zabbix_host
+ zabbix_host_events_info:
+ redirect: community.zabbix.zabbix_host_events_info
+ zabbix_host_info:
+ redirect: community.zabbix.zabbix_host_info
+ zabbix_hostmacro:
+ redirect: community.zabbix.zabbix_hostmacro
+ zabbix_maintenance:
+ redirect: community.zabbix.zabbix_maintenance
+ zabbix_map:
+ redirect: community.zabbix.zabbix_map
+ zabbix_mediatype:
+ redirect: community.zabbix.zabbix_mediatype
+ zabbix_proxy:
+ redirect: community.zabbix.zabbix_proxy
+ zabbix_screen:
+ redirect: community.zabbix.zabbix_screen
+ zabbix_service:
+ redirect: community.zabbix.zabbix_service
+ zabbix_template:
+ redirect: community.zabbix.zabbix_template
+ zabbix_template_info:
+ redirect: community.zabbix.zabbix_template_info
+ zabbix_user:
+ redirect: community.zabbix.zabbix_user
+ zabbix_user_info:
+ redirect: community.zabbix.zabbix_user_info
+ zabbix_valuemap:
+ redirect: community.zabbix.zabbix_valuemap
+ cloudflare_dns:
+ redirect: community.general.cloudflare_dns
+ dnsimple:
+ redirect: community.general.dnsimple
+ dnsmadeeasy:
+ redirect: community.general.dnsmadeeasy
+ exo_dns_domain:
+ redirect: ngine_io.exoscale.exo_dns_domain
+ exo_dns_record:
+ redirect: ngine_io.exoscale.exo_dns_record
+ haproxy:
+ redirect: community.general.haproxy
+ hetzner_failover_ip:
+ redirect: community.hrobot.failover_ip
+ hetzner_failover_ip_info:
+ redirect: community.hrobot.failover_ip_info
+ hetzner_firewall:
+ redirect: community.hrobot.firewall
+ hetzner_firewall_info:
+ redirect: community.hrobot.firewall_info
+ infinity:
+ redirect: community.general.infinity
+ ip_netns:
+ redirect: community.general.ip_netns
+ ipify_facts:
+ redirect: community.general.ipify_facts
+ ipinfoio_facts:
+ redirect: community.general.ipinfoio_facts
+ ipwcli_dns:
+ redirect: community.general.ipwcli_dns
+ ldap_attr:
+ redirect: community.general.ldap_attr
+ ldap_attrs:
+ redirect: community.general.ldap_attrs
+ ldap_entry:
+ redirect: community.general.ldap_entry
+ ldap_passwd:
+ redirect: community.general.ldap_passwd
+ lldp:
+ redirect: community.general.lldp
+ netcup_dns:
+ redirect: community.general.netcup_dns
+ nios_a_record:
+ redirect: community.general.nios_a_record
+ nios_aaaa_record:
+ redirect: community.general.nios_aaaa_record
+ nios_cname_record:
+ redirect: community.general.nios_cname_record
+ nios_dns_view:
+ redirect: community.general.nios_dns_view
+ nios_fixed_address:
+ redirect: community.general.nios_fixed_address
+ nios_host_record:
+ redirect: community.general.nios_host_record
+ nios_member:
+ redirect: community.general.nios_member
+ nios_mx_record:
+ redirect: community.general.nios_mx_record
+ nios_naptr_record:
+ redirect: community.general.nios_naptr_record
+ nios_network:
+ redirect: community.general.nios_network
+ nios_network_view:
+ redirect: community.general.nios_network_view
+ nios_nsgroup:
+ redirect: community.general.nios_nsgroup
+ nios_ptr_record:
+ redirect: community.general.nios_ptr_record
+ nios_srv_record:
+ redirect: community.general.nios_srv_record
+ nios_txt_record:
+ redirect: community.general.nios_txt_record
+ nios_zone:
+ redirect: community.general.nios_zone
+ nmcli:
+ redirect: community.general.nmcli
+ nsupdate:
+ redirect: community.general.nsupdate
+ omapi_host:
+ redirect: community.general.omapi_host
+ snmp_facts:
+ redirect: community.general.snmp_facts
+ a10_server:
+ redirect: community.network.a10_server
+ a10_server_axapi3:
+ redirect: community.network.a10_server_axapi3
+ a10_service_group:
+ redirect: community.network.a10_service_group
+ a10_virtual_server:
+ redirect: community.network.a10_virtual_server
+ aci_intf_policy_fc:
+ redirect: cisco.aci.aci_interface_policy_fc
+ aci_intf_policy_l2:
+ redirect: cisco.aci.aci_interface_policy_l2
+ aci_intf_policy_lldp:
+ redirect: cisco.aci.aci_interface_policy_lldp
+ aci_intf_policy_mcp:
+ redirect: cisco.aci.aci_interface_policy_mcp
+ aci_intf_policy_port_channel:
+ redirect: cisco.aci.aci_interface_policy_port_channel
+ aci_intf_policy_port_security:
+ redirect: cisco.aci.aci_interface_policy_port_security
+ mso_schema_template_external_epg_contract:
+ redirect: cisco.mso.mso_schema_template_external_epg_contract
+ mso_schema_template_external_epg_subnet:
+ redirect: cisco.mso.mso_schema_template_external_epg_subnet
+ aireos_command:
+ redirect: community.network.aireos_command
+ aireos_config:
+ redirect: community.network.aireos_config
+ apconos_command:
+ redirect: community.network.apconos_command
+ aruba_command:
+ redirect: community.network.aruba_command
+ aruba_config:
+ redirect: community.network.aruba_config
+ avi_actiongroupconfig:
+ redirect: community.network.avi_actiongroupconfig
+ avi_alertconfig:
+ redirect: community.network.avi_alertconfig
+ avi_alertemailconfig:
+ redirect: community.network.avi_alertemailconfig
+ avi_alertscriptconfig:
+ redirect: community.network.avi_alertscriptconfig
+ avi_alertsyslogconfig:
+ redirect: community.network.avi_alertsyslogconfig
+ avi_analyticsprofile:
+ redirect: community.network.avi_analyticsprofile
+ avi_api_session:
+ redirect: community.network.avi_api_session
+ avi_api_version:
+ redirect: community.network.avi_api_version
+ avi_applicationpersistenceprofile:
+ redirect: community.network.avi_applicationpersistenceprofile
+ avi_applicationprofile:
+ redirect: community.network.avi_applicationprofile
+ avi_authprofile:
+ redirect: community.network.avi_authprofile
+ avi_autoscalelaunchconfig:
+ redirect: community.network.avi_autoscalelaunchconfig
+ avi_backup:
+ redirect: community.network.avi_backup
+ avi_backupconfiguration:
+ redirect: community.network.avi_backupconfiguration
+ avi_certificatemanagementprofile:
+ redirect: community.network.avi_certificatemanagementprofile
+ avi_cloud:
+ redirect: community.network.avi_cloud
+ avi_cloudconnectoruser:
+ redirect: community.network.avi_cloudconnectoruser
+ avi_cloudproperties:
+ redirect: community.network.avi_cloudproperties
+ avi_cluster:
+ redirect: community.network.avi_cluster
+ avi_clusterclouddetails:
+ redirect: community.network.avi_clusterclouddetails
+ avi_controllerproperties:
+ redirect: community.network.avi_controllerproperties
+ avi_customipamdnsprofile:
+ redirect: community.network.avi_customipamdnsprofile
+ avi_dnspolicy:
+ redirect: community.network.avi_dnspolicy
+ avi_errorpagebody:
+ redirect: community.network.avi_errorpagebody
+ avi_errorpageprofile:
+ redirect: community.network.avi_errorpageprofile
+ avi_gslb:
+ redirect: community.network.avi_gslb
+ avi_gslbgeodbprofile:
+ redirect: community.network.avi_gslbgeodbprofile
+ avi_gslbservice:
+ redirect: community.network.avi_gslbservice
+ avi_gslbservice_patch_member:
+ redirect: community.network.avi_gslbservice_patch_member
+ avi_hardwaresecuritymodulegroup:
+ redirect: community.network.avi_hardwaresecuritymodulegroup
+ avi_healthmonitor:
+ redirect: community.network.avi_healthmonitor
+ avi_httppolicyset:
+ redirect: community.network.avi_httppolicyset
+ avi_ipaddrgroup:
+ redirect: community.network.avi_ipaddrgroup
+ avi_ipamdnsproviderprofile:
+ redirect: community.network.avi_ipamdnsproviderprofile
+ avi_l4policyset:
+ redirect: community.network.avi_l4policyset
+ avi_microservicegroup:
+ redirect: community.network.avi_microservicegroup
+ avi_network:
+ redirect: community.network.avi_network
+ avi_networkprofile:
+ redirect: community.network.avi_networkprofile
+ avi_networksecuritypolicy:
+ redirect: community.network.avi_networksecuritypolicy
+ avi_pkiprofile:
+ redirect: community.network.avi_pkiprofile
+ avi_pool:
+ redirect: community.network.avi_pool
+ avi_poolgroup:
+ redirect: community.network.avi_poolgroup
+ avi_poolgroupdeploymentpolicy:
+ redirect: community.network.avi_poolgroupdeploymentpolicy
+ avi_prioritylabels:
+ redirect: community.network.avi_prioritylabels
+ avi_role:
+ redirect: community.network.avi_role
+ avi_scheduler:
+ redirect: community.network.avi_scheduler
+ avi_seproperties:
+ redirect: community.network.avi_seproperties
+ avi_serverautoscalepolicy:
+ redirect: community.network.avi_serverautoscalepolicy
+ avi_serviceengine:
+ redirect: community.network.avi_serviceengine
+ avi_serviceenginegroup:
+ redirect: community.network.avi_serviceenginegroup
+ avi_snmptrapprofile:
+ redirect: community.network.avi_snmptrapprofile
+ avi_sslkeyandcertificate:
+ redirect: community.network.avi_sslkeyandcertificate
+ avi_sslprofile:
+ redirect: community.network.avi_sslprofile
+ avi_stringgroup:
+ redirect: community.network.avi_stringgroup
+ avi_systemconfiguration:
+ redirect: community.network.avi_systemconfiguration
+ avi_tenant:
+ redirect: community.network.avi_tenant
+ avi_trafficcloneprofile:
+ redirect: community.network.avi_trafficcloneprofile
+ avi_user:
+ redirect: community.network.avi_user
+ avi_useraccount:
+ redirect: community.network.avi_useraccount
+ avi_useraccountprofile:
+ redirect: community.network.avi_useraccountprofile
+ avi_virtualservice:
+ redirect: community.network.avi_virtualservice
+ avi_vrfcontext:
+ redirect: community.network.avi_vrfcontext
+ avi_vsdatascriptset:
+ redirect: community.network.avi_vsdatascriptset
+ avi_vsvip:
+ redirect: community.network.avi_vsvip
+ avi_webhook:
+ redirect: community.network.avi_webhook
+ bcf_switch:
+ redirect: community.network.bcf_switch
+ bigmon_chain:
+ redirect: community.network.bigmon_chain
+ bigmon_policy:
+ redirect: community.network.bigmon_policy
+ checkpoint_access_layer_facts:
+ redirect: check_point.mgmt.checkpoint_access_layer_facts
+ checkpoint_access_rule:
+ redirect: check_point.mgmt.checkpoint_access_rule
+ checkpoint_access_rule_facts:
+ redirect: check_point.mgmt.checkpoint_access_rule_facts
+ checkpoint_host:
+ redirect: check_point.mgmt.checkpoint_host
+ checkpoint_host_facts:
+ redirect: check_point.mgmt.checkpoint_host_facts
+ checkpoint_object_facts:
+ redirect: check_point.mgmt.checkpoint_object_facts
+ checkpoint_run_script:
+ redirect: check_point.mgmt.checkpoint_run_script
+ checkpoint_session:
+ redirect: check_point.mgmt.checkpoint_session
+ checkpoint_task_facts:
+ redirect: check_point.mgmt.checkpoint_task_facts
+ cp_publish:
+ redirect: community.network.cp_publish
+ ce_aaa_server:
+ redirect: community.network.ce_aaa_server
+ ce_aaa_server_host:
+ redirect: community.network.ce_aaa_server_host
+ ce_acl:
+ redirect: community.network.ce_acl
+ ce_acl_advance:
+ redirect: community.network.ce_acl_advance
+ ce_acl_interface:
+ redirect: community.network.ce_acl_interface
+ ce_bfd_global:
+ redirect: community.network.ce_bfd_global
+ ce_bfd_session:
+ redirect: community.network.ce_bfd_session
+ ce_bfd_view:
+ redirect: community.network.ce_bfd_view
+ ce_bgp:
+ redirect: community.network.ce_bgp
+ ce_bgp_af:
+ redirect: community.network.ce_bgp_af
+ ce_bgp_neighbor:
+ redirect: community.network.ce_bgp_neighbor
+ ce_bgp_neighbor_af:
+ redirect: community.network.ce_bgp_neighbor_af
+ ce_command:
+ redirect: community.network.ce_command
+ ce_config:
+ redirect: community.network.ce_config
+ ce_dldp:
+ redirect: community.network.ce_dldp
+ ce_dldp_interface:
+ redirect: community.network.ce_dldp_interface
+ ce_eth_trunk:
+ redirect: community.network.ce_eth_trunk
+ ce_evpn_bd_vni:
+ redirect: community.network.ce_evpn_bd_vni
+ ce_evpn_bgp:
+ redirect: community.network.ce_evpn_bgp
+ ce_evpn_bgp_rr:
+ redirect: community.network.ce_evpn_bgp_rr
+ ce_evpn_global:
+ redirect: community.network.ce_evpn_global
+ ce_facts:
+ redirect: community.network.ce_facts
+ ce_file_copy:
+ redirect: community.network.ce_file_copy
+ ce_info_center_debug:
+ redirect: community.network.ce_info_center_debug
+ ce_info_center_global:
+ redirect: community.network.ce_info_center_global
+ ce_info_center_log:
+ redirect: community.network.ce_info_center_log
+ ce_info_center_trap:
+ redirect: community.network.ce_info_center_trap
+ ce_interface:
+ redirect: community.network.ce_interface
+ ce_interface_ospf:
+ redirect: community.network.ce_interface_ospf
+ ce_ip_interface:
+ redirect: community.network.ce_ip_interface
+ ce_is_is_instance:
+ redirect: community.network.ce_is_is_instance
+ ce_is_is_interface:
+ redirect: community.network.ce_is_is_interface
+ ce_is_is_view:
+ redirect: community.network.ce_is_is_view
+ ce_lacp:
+ redirect: community.network.ce_lacp
+ ce_link_status:
+ redirect: community.network.ce_link_status
+ ce_lldp:
+ redirect: community.network.ce_lldp
+ ce_lldp_interface:
+ redirect: community.network.ce_lldp_interface
+ ce_mdn_interface:
+ redirect: community.network.ce_mdn_interface
+ ce_mlag_config:
+ redirect: community.network.ce_mlag_config
+ ce_mlag_interface:
+ redirect: community.network.ce_mlag_interface
+ ce_mtu:
+ redirect: community.network.ce_mtu
+ ce_multicast_global:
+ redirect: community.network.ce_multicast_global
+ ce_multicast_igmp_enable:
+ redirect: community.network.ce_multicast_igmp_enable
+ ce_netconf:
+ redirect: community.network.ce_netconf
+ ce_netstream_aging:
+ redirect: community.network.ce_netstream_aging
+ ce_netstream_export:
+ redirect: community.network.ce_netstream_export
+ ce_netstream_global:
+ redirect: community.network.ce_netstream_global
+ ce_netstream_template:
+ redirect: community.network.ce_netstream_template
+ ce_ntp:
+ redirect: community.network.ce_ntp
+ ce_ntp_auth:
+ redirect: community.network.ce_ntp_auth
+ ce_ospf:
+ redirect: community.network.ce_ospf
+ ce_ospf_vrf:
+ redirect: community.network.ce_ospf_vrf
+ ce_reboot:
+ redirect: community.network.ce_reboot
+ ce_rollback:
+ redirect: community.network.ce_rollback
+ ce_sflow:
+ redirect: community.network.ce_sflow
+ ce_snmp_community:
+ redirect: community.network.ce_snmp_community
+ ce_snmp_contact:
+ redirect: community.network.ce_snmp_contact
+ ce_snmp_location:
+ redirect: community.network.ce_snmp_location
+ ce_snmp_target_host:
+ redirect: community.network.ce_snmp_target_host
+ ce_snmp_traps:
+ redirect: community.network.ce_snmp_traps
+ ce_snmp_user:
+ redirect: community.network.ce_snmp_user
+ ce_startup:
+ redirect: community.network.ce_startup
+ ce_static_route:
+ redirect: community.network.ce_static_route
+ ce_static_route_bfd:
+ redirect: community.network.ce_static_route_bfd
+ ce_stp:
+ redirect: community.network.ce_stp
+ ce_switchport:
+ redirect: community.network.ce_switchport
+ ce_vlan:
+ redirect: community.network.ce_vlan
+ ce_vrf:
+ redirect: community.network.ce_vrf
+ ce_vrf_af:
+ redirect: community.network.ce_vrf_af
+ ce_vrf_interface:
+ redirect: community.network.ce_vrf_interface
+ ce_vrrp:
+ redirect: community.network.ce_vrrp
+ ce_vxlan_arp:
+ redirect: community.network.ce_vxlan_arp
+ ce_vxlan_gateway:
+ redirect: community.network.ce_vxlan_gateway
+ ce_vxlan_global:
+ redirect: community.network.ce_vxlan_global
+ ce_vxlan_tunnel:
+ redirect: community.network.ce_vxlan_tunnel
+ ce_vxlan_vap:
+ redirect: community.network.ce_vxlan_vap
+ cv_server_provision:
+ redirect: community.network.cv_server_provision
+ cnos_backup:
+ redirect: community.network.cnos_backup
+ cnos_banner:
+ redirect: community.network.cnos_banner
+ cnos_bgp:
+ redirect: community.network.cnos_bgp
+ cnos_command:
+ redirect: community.network.cnos_command
+ cnos_conditional_command:
+ redirect: community.network.cnos_conditional_command
+ cnos_conditional_template:
+ redirect: community.network.cnos_conditional_template
+ cnos_config:
+ redirect: community.network.cnos_config
+ cnos_factory:
+ redirect: community.network.cnos_factory
+ cnos_facts:
+ redirect: community.network.cnos_facts
+ cnos_image:
+ redirect: community.network.cnos_image
+ cnos_interface:
+ redirect: community.network.cnos_interface
+ cnos_l2_interface:
+ redirect: community.network.cnos_l2_interface
+ cnos_l3_interface:
+ redirect: community.network.cnos_l3_interface
+ cnos_linkagg:
+ redirect: community.network.cnos_linkagg
+ cnos_lldp:
+ redirect: community.network.cnos_lldp
+ cnos_logging:
+ redirect: community.network.cnos_logging
+ cnos_reload:
+ redirect: community.network.cnos_reload
+ cnos_rollback:
+ redirect: community.network.cnos_rollback
+ cnos_save:
+ redirect: community.network.cnos_save
+ cnos_showrun:
+ redirect: community.network.cnos_showrun
+ cnos_static_route:
+ redirect: community.network.cnos_static_route
+ cnos_system:
+ redirect: community.network.cnos_system
+ cnos_template:
+ redirect: community.network.cnos_template
+ cnos_user:
+ redirect: community.network.cnos_user
+ cnos_vlag:
+ redirect: community.network.cnos_vlag
+ cnos_vlan:
+ redirect: community.network.cnos_vlan
+ cnos_vrf:
+ redirect: community.network.cnos_vrf
+ nclu:
+ redirect: community.network.nclu
+ edgeos_command:
+ redirect: community.network.edgeos_command
+ edgeos_config:
+ redirect: community.network.edgeos_config
+ edgeos_facts:
+ redirect: community.network.edgeos_facts
+ edgeswitch_facts:
+ redirect: community.network.edgeswitch_facts
+ edgeswitch_vlan:
+ redirect: community.network.edgeswitch_vlan
+ enos_command:
+ redirect: community.network.enos_command
+ enos_config:
+ redirect: community.network.enos_config
+ enos_facts:
+ redirect: community.network.enos_facts
+ eric_eccli_command:
+ redirect: community.network.eric_eccli_command
+ exos_command:
+ redirect: community.network.exos_command
+ exos_config:
+ redirect: community.network.exos_config
+ exos_facts:
+ redirect: community.network.exos_facts
+ exos_l2_interfaces:
+ redirect: community.network.exos_l2_interfaces
+ exos_lldp_global:
+ redirect: community.network.exos_lldp_global
+ exos_lldp_interfaces:
+ redirect: community.network.exos_lldp_interfaces
+ exos_vlans:
+ redirect: community.network.exos_vlans
+ bigip_asm_policy:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_asm_policy has been removed please use bigip_asm_policy_manage instead.
+ bigip_device_facts:
+ redirect: f5networks.f5_modules.bigip_device_info
+ bigip_iapplx_package:
+ redirect: f5networks.f5_modules.bigip_lx_package
+ bigip_security_address_list:
+ redirect: f5networks.f5_modules.bigip_firewall_address_list
+ bigip_security_port_list:
+ redirect: f5networks.f5_modules.bigip_firewall_port_list
+ bigip_traffic_group:
+ redirect: f5networks.f5_modules.bigip_device_traffic_group
+ bigip_facts:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_facts has been removed please use bigip_device_info module.
+ bigip_gtm_facts:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_gtm_facts has been removed please use bigip_device_info module.
+ faz_device:
+ redirect: community.fortios.faz_device
+ fmgr_device:
+ redirect: community.fortios.fmgr_device
+ fmgr_device_config:
+ redirect: community.fortios.fmgr_device_config
+ fmgr_device_group:
+ redirect: community.fortios.fmgr_device_group
+ fmgr_device_provision_template:
+ redirect: community.fortios.fmgr_device_provision_template
+ fmgr_fwobj_address:
+ redirect: community.fortios.fmgr_fwobj_address
+ fmgr_fwobj_ippool:
+ redirect: community.fortios.fmgr_fwobj_ippool
+ fmgr_fwobj_ippool6:
+ redirect: community.fortios.fmgr_fwobj_ippool6
+ fmgr_fwobj_service:
+ redirect: community.fortios.fmgr_fwobj_service
+ fmgr_fwobj_vip:
+ redirect: community.fortios.fmgr_fwobj_vip
+ fmgr_fwpol_ipv4:
+ redirect: community.fortios.fmgr_fwpol_ipv4
+ fmgr_fwpol_package:
+ redirect: community.fortios.fmgr_fwpol_package
+ fmgr_ha:
+ redirect: community.fortios.fmgr_ha
+ fmgr_provisioning:
+ redirect: community.fortios.fmgr_provisioning
+ fmgr_query:
+ redirect: community.fortios.fmgr_query
+ fmgr_script:
+ redirect: community.fortios.fmgr_script
+ fmgr_secprof_appctrl:
+ redirect: community.fortios.fmgr_secprof_appctrl
+ fmgr_secprof_av:
+ redirect: community.fortios.fmgr_secprof_av
+ fmgr_secprof_dns:
+ redirect: community.fortios.fmgr_secprof_dns
+ fmgr_secprof_ips:
+ redirect: community.fortios.fmgr_secprof_ips
+ fmgr_secprof_profile_group:
+ redirect: community.fortios.fmgr_secprof_profile_group
+ fmgr_secprof_proxy:
+ redirect: community.fortios.fmgr_secprof_proxy
+ fmgr_secprof_spam:
+ redirect: community.fortios.fmgr_secprof_spam
+ fmgr_secprof_ssl_ssh:
+ redirect: community.fortios.fmgr_secprof_ssl_ssh
+ fmgr_secprof_voip:
+ redirect: community.fortios.fmgr_secprof_voip
+ fmgr_secprof_waf:
+ redirect: community.fortios.fmgr_secprof_waf
+ fmgr_secprof_wanopt:
+ redirect: community.fortios.fmgr_secprof_wanopt
+ fmgr_secprof_web:
+ redirect: community.fortios.fmgr_secprof_web
+ ftd_configuration:
+ redirect: community.network.ftd_configuration
+ ftd_file_download:
+ redirect: community.network.ftd_file_download
+ ftd_file_upload:
+ redirect: community.network.ftd_file_upload
+ ftd_install:
+ redirect: community.network.ftd_install
+ icx_banner:
+ redirect: community.network.icx_banner
+ icx_command:
+ redirect: community.network.icx_command
+ icx_config:
+ redirect: community.network.icx_config
+ icx_copy:
+ redirect: community.network.icx_copy
+ icx_facts:
+ redirect: community.network.icx_facts
+ icx_interface:
+ redirect: community.network.icx_interface
+ icx_l3_interface:
+ redirect: community.network.icx_l3_interface
+ icx_linkagg:
+ redirect: community.network.icx_linkagg
+ icx_lldp:
+ redirect: community.network.icx_lldp
+ icx_logging:
+ redirect: community.network.icx_logging
+ icx_ping:
+ redirect: community.network.icx_ping
+ icx_static_route:
+ redirect: community.network.icx_static_route
+ icx_system:
+ redirect: community.network.icx_system
+ icx_user:
+ redirect: community.network.icx_user
+ icx_vlan:
+ redirect: community.network.icx_vlan
+ dladm_etherstub:
+ redirect: community.network.dladm_etherstub
+ dladm_iptun:
+ redirect: community.network.dladm_iptun
+ dladm_linkprop:
+ redirect: community.network.dladm_linkprop
+ dladm_vlan:
+ redirect: community.network.dladm_vlan
+ dladm_vnic:
+ redirect: community.network.dladm_vnic
+ flowadm:
+ redirect: community.network.flowadm
+ ipadm_addr:
+ redirect: community.network.ipadm_addr
+ ipadm_addrprop:
+ redirect: community.network.ipadm_addrprop
+ ipadm_if:
+ redirect: community.network.ipadm_if
+ ipadm_ifprop:
+ redirect: community.network.ipadm_ifprop
+ ipadm_prop:
+ redirect: community.network.ipadm_prop
+ ig_config:
+ redirect: community.network.ig_config
+ ig_unit_information:
+ redirect: community.network.ig_unit_information
+ ironware_command:
+ redirect: community.network.ironware_command
+ ironware_config:
+ redirect: community.network.ironware_config
+ ironware_facts:
+ redirect: community.network.ironware_facts
+ iap_start_workflow:
+ redirect: community.network.iap_start_workflow
+ iap_token:
+ redirect: community.network.iap_token
+ netact_cm_command:
+ redirect: community.network.netact_cm_command
+ netscaler_cs_action:
+ redirect: community.network.netscaler_cs_action
+ netscaler_cs_policy:
+ redirect: community.network.netscaler_cs_policy
+ netscaler_cs_vserver:
+ redirect: community.network.netscaler_cs_vserver
+ netscaler_gslb_service:
+ redirect: community.network.netscaler_gslb_service
+ netscaler_gslb_site:
+ redirect: community.network.netscaler_gslb_site
+ netscaler_gslb_vserver:
+ redirect: community.network.netscaler_gslb_vserver
+ netscaler_lb_monitor:
+ redirect: community.network.netscaler_lb_monitor
+ netscaler_lb_vserver:
+ redirect: community.network.netscaler_lb_vserver
+ netscaler_nitro_request:
+ redirect: community.network.netscaler_nitro_request
+ netscaler_save_config:
+ redirect: community.network.netscaler_save_config
+ netscaler_server:
+ redirect: community.network.netscaler_server
+ netscaler_service:
+ redirect: community.network.netscaler_service
+ netscaler_servicegroup:
+ redirect: community.network.netscaler_servicegroup
+ netscaler_ssl_certkey:
+ redirect: community.network.netscaler_ssl_certkey
+ pn_cluster:
+ redirect: community.network.pn_cluster
+ pn_ospf:
+ redirect: community.network.pn_ospf
+ pn_ospfarea:
+ redirect: community.network.pn_ospfarea
+ pn_show:
+ redirect: community.network.pn_show
+ pn_trunk:
+ redirect: community.network.pn_trunk
+ pn_vlag:
+ redirect: community.network.pn_vlag
+ pn_vlan:
+ redirect: community.network.pn_vlan
+ pn_vrouter:
+ redirect: community.network.pn_vrouter
+ pn_vrouterbgp:
+ redirect: community.network.pn_vrouterbgp
+ pn_vrouterif:
+ redirect: community.network.pn_vrouterif
+ pn_vrouterlbif:
+ redirect: community.network.pn_vrouterlbif
+ pn_access_list:
+ redirect: community.network.pn_access_list
+ pn_access_list_ip:
+ redirect: community.network.pn_access_list_ip
+ pn_admin_service:
+ redirect: community.network.pn_admin_service
+ pn_admin_session_timeout:
+ redirect: community.network.pn_admin_session_timeout
+ pn_admin_syslog:
+ redirect: community.network.pn_admin_syslog
+ pn_connection_stats_settings:
+ redirect: community.network.pn_connection_stats_settings
+ pn_cpu_class:
+ redirect: community.network.pn_cpu_class
+ pn_cpu_mgmt_class:
+ redirect: community.network.pn_cpu_mgmt_class
+ pn_dhcp_filter:
+ redirect: community.network.pn_dhcp_filter
+ pn_dscp_map:
+ redirect: community.network.pn_dscp_map
+ pn_dscp_map_pri_map:
+ redirect: community.network.pn_dscp_map_pri_map
+ pn_fabric_local:
+ redirect: community.network.pn_fabric_local
+ pn_igmp_snooping:
+ redirect: community.network.pn_igmp_snooping
+ pn_ipv6security_raguard:
+ redirect: community.network.pn_ipv6security_raguard
+ pn_ipv6security_raguard_port:
+ redirect: community.network.pn_ipv6security_raguard_port
+ pn_ipv6security_raguard_vlan:
+ redirect: community.network.pn_ipv6security_raguard_vlan
+ pn_log_audit_exception:
+ redirect: community.network.pn_log_audit_exception
+ pn_port_config:
+ redirect: community.network.pn_port_config
+ pn_port_cos_bw:
+ redirect: community.network.pn_port_cos_bw
+ pn_port_cos_rate_setting:
+ redirect: community.network.pn_port_cos_rate_setting
+ pn_prefix_list:
+ redirect: community.network.pn_prefix_list
+ pn_prefix_list_network:
+ redirect: community.network.pn_prefix_list_network
+ pn_role:
+ redirect: community.network.pn_role
+ pn_snmp_community:
+ redirect: community.network.pn_snmp_community
+ pn_snmp_trap_sink:
+ redirect: community.network.pn_snmp_trap_sink
+ pn_snmp_vacm:
+ redirect: community.network.pn_snmp_vacm
+ pn_stp:
+ redirect: community.network.pn_stp
+ pn_stp_port:
+ redirect: community.network.pn_stp_port
+ pn_switch_setup:
+ redirect: community.network.pn_switch_setup
+ pn_user:
+ redirect: community.network.pn_user
+ pn_vflow_table_profile:
+ redirect: community.network.pn_vflow_table_profile
+ pn_vrouter_bgp:
+ redirect: community.network.pn_vrouter_bgp
+ pn_vrouter_bgp_network:
+ redirect: community.network.pn_vrouter_bgp_network
+ pn_vrouter_interface_ip:
+ redirect: community.network.pn_vrouter_interface_ip
+ pn_vrouter_loopback_interface:
+ redirect: community.network.pn_vrouter_loopback_interface
+ pn_vrouter_ospf:
+ redirect: community.network.pn_vrouter_ospf
+ pn_vrouter_ospf6:
+ redirect: community.network.pn_vrouter_ospf6
+ pn_vrouter_packet_relay:
+ redirect: community.network.pn_vrouter_packet_relay
+ pn_vrouter_pim_config:
+ redirect: community.network.pn_vrouter_pim_config
+ pn_vtep:
+ redirect: community.network.pn_vtep
+ nos_command:
+ redirect: community.network.nos_command
+ nos_config:
+ redirect: community.network.nos_config
+ nos_facts:
+ redirect: community.network.nos_facts
+ nso_action:
+ redirect: cisco.nso.nso_action
+ nso_config:
+ redirect: cisco.nso.nso_config
+ nso_query:
+ redirect: cisco.nso.nso_query
+ nso_show:
+ redirect: cisco.nso.nso_show
+ nso_verify:
+ redirect: cisco.nso.nso_verify
+ nuage_vspk:
+ redirect: community.network.nuage_vspk
+ onyx_aaa:
+ redirect: mellanox.onyx.onyx_aaa
+ onyx_bfd:
+ redirect: mellanox.onyx.onyx_bfd
+ onyx_bgp:
+ redirect: mellanox.onyx.onyx_bgp
+ onyx_buffer_pool:
+ redirect: mellanox.onyx.onyx_buffer_pool
+ onyx_command:
+ redirect: mellanox.onyx.onyx_command
+ onyx_config:
+ redirect: mellanox.onyx.onyx_config
+ onyx_facts:
+ redirect: mellanox.onyx.onyx_facts
+ onyx_igmp:
+ redirect: mellanox.onyx.onyx_igmp
+ onyx_igmp_interface:
+ redirect: mellanox.onyx.onyx_igmp_interface
+ onyx_igmp_vlan:
+ redirect: mellanox.onyx.onyx_igmp_vlan
+ onyx_interface:
+ redirect: mellanox.onyx.onyx_interface
+ onyx_l2_interface:
+ redirect: mellanox.onyx.onyx_l2_interface
+ onyx_l3_interface:
+ redirect: mellanox.onyx.onyx_l3_interface
+ onyx_linkagg:
+ redirect: mellanox.onyx.onyx_linkagg
+ onyx_lldp:
+ redirect: mellanox.onyx.onyx_lldp
+ onyx_lldp_interface:
+ redirect: mellanox.onyx.onyx_lldp_interface
+ onyx_magp:
+ redirect: mellanox.onyx.onyx_magp
+ onyx_mlag_ipl:
+ redirect: mellanox.onyx.onyx_mlag_ipl
+ onyx_mlag_vip:
+ redirect: mellanox.onyx.onyx_mlag_vip
+ onyx_ntp:
+ redirect: mellanox.onyx.onyx_ntp
+ onyx_ntp_servers_peers:
+ redirect: mellanox.onyx.onyx_ntp_servers_peers
+ onyx_ospf:
+ redirect: mellanox.onyx.onyx_ospf
+ onyx_pfc_interface:
+ redirect: mellanox.onyx.onyx_pfc_interface
+ onyx_protocol:
+ redirect: mellanox.onyx.onyx_protocol
+ onyx_ptp_global:
+ redirect: mellanox.onyx.onyx_ptp_global
+ onyx_ptp_interface:
+ redirect: mellanox.onyx.onyx_ptp_interface
+ onyx_qos:
+ redirect: mellanox.onyx.onyx_qos
+ onyx_snmp:
+ redirect: mellanox.onyx.onyx_snmp
+ onyx_snmp_hosts:
+ redirect: mellanox.onyx.onyx_snmp_hosts
+ onyx_snmp_users:
+ redirect: mellanox.onyx.onyx_snmp_users
+ onyx_syslog_files:
+ redirect: mellanox.onyx.onyx_syslog_files
+ onyx_syslog_remote:
+ redirect: mellanox.onyx.onyx_syslog_remote
+ onyx_traffic_class:
+ redirect: mellanox.onyx.onyx_traffic_class
+ onyx_username:
+ redirect: mellanox.onyx.onyx_username
+ onyx_vlan:
+ redirect: mellanox.onyx.onyx_vlan
+ onyx_vxlan:
+ redirect: mellanox.onyx.onyx_vxlan
+ onyx_wjh:
+ redirect: mellanox.onyx.onyx_wjh
+ opx_cps:
+ redirect: community.network.opx_cps
+ ordnance_config:
+ redirect: community.network.ordnance_config
+ ordnance_facts:
+ redirect: community.network.ordnance_facts
+ panos_admin:
+ redirect: community.network.panos_admin
+ panos_admpwd:
+ redirect: community.network.panos_admpwd
+ panos_cert_gen_ssh:
+ redirect: community.network.panos_cert_gen_ssh
+ panos_check:
+ redirect: community.network.panos_check
+ panos_commit:
+ redirect: community.network.panos_commit
+ panos_dag:
+ redirect: community.network.panos_dag
+ panos_dag_tags:
+ redirect: community.network.panos_dag_tags
+ panos_import:
+ redirect: community.network.panos_import
+ panos_interface:
+ redirect: community.network.panos_interface
+ panos_lic:
+ redirect: community.network.panos_lic
+ panos_loadcfg:
+ redirect: community.network.panos_loadcfg
+ panos_match_rule:
+ redirect: community.network.panos_match_rule
+ panos_mgtconfig:
+ redirect: community.network.panos_mgtconfig
+ panos_nat_rule:
+ redirect: community.network.panos_nat_rule
+ panos_object:
+ redirect: community.network.panos_object
+ panos_op:
+ redirect: community.network.panos_op
+ panos_pg:
+ redirect: community.network.panos_pg
+ panos_query_rules:
+ redirect: community.network.panos_query_rules
+ panos_restart:
+ redirect: community.network.panos_restart
+ panos_sag:
+ redirect: community.network.panos_sag
+ panos_security_rule:
+ redirect: community.network.panos_security_rule
+ panos_set:
+ redirect: community.network.panos_set
+ vdirect_commit:
+ redirect: community.network.vdirect_commit
+ vdirect_file:
+ redirect: community.network.vdirect_file
+ vdirect_runnable:
+ redirect: community.network.vdirect_runnable
+ routeros_command:
+ redirect: community.routeros.command
+ routeros_facts:
+ redirect: community.routeros.facts
+ slxos_command:
+ redirect: community.network.slxos_command
+ slxos_config:
+ redirect: community.network.slxos_config
+ slxos_facts:
+ redirect: community.network.slxos_facts
+ slxos_interface:
+ redirect: community.network.slxos_interface
+ slxos_l2_interface:
+ redirect: community.network.slxos_l2_interface
+ slxos_l3_interface:
+ redirect: community.network.slxos_l3_interface
+ slxos_linkagg:
+ redirect: community.network.slxos_linkagg
+ slxos_lldp:
+ redirect: community.network.slxos_lldp
+ slxos_vlan:
+ redirect: community.network.slxos_vlan
+ sros_command:
+ redirect: community.network.sros_command
+ sros_config:
+ redirect: community.network.sros_config
+ sros_rollback:
+ redirect: community.network.sros_rollback
+ voss_command:
+ redirect: community.network.voss_command
+ voss_config:
+ redirect: community.network.voss_config
+ voss_facts:
+ redirect: community.network.voss_facts
+ osx_say:
+ redirect: community.general.say
+ bearychat:
+ redirect: community.general.bearychat
+ campfire:
+ redirect: community.general.campfire
+ catapult:
+ redirect: community.general.catapult
+ cisco_spark:
+ redirect: community.general.cisco_spark
+ flowdock:
+ redirect: community.general.flowdock
+ grove:
+ redirect: community.general.grove
+ hipchat:
+ redirect: community.general.hipchat
+ irc:
+ redirect: community.general.irc
+ jabber:
+ redirect: community.general.jabber
+ logentries_msg:
+ redirect: community.general.logentries_msg
+ mail:
+ redirect: community.general.mail
+ matrix:
+ redirect: community.general.matrix
+ mattermost:
+ redirect: community.general.mattermost
+ mqtt:
+ redirect: community.general.mqtt
+ nexmo:
+ redirect: community.general.nexmo
+ office_365_connector_card:
+ redirect: community.general.office_365_connector_card
+ pushbullet:
+ redirect: community.general.pushbullet
+ pushover:
+ redirect: community.general.pushover
+ rabbitmq_publish:
+ redirect: community.rabbitmq.rabbitmq_publish
+ rocketchat:
+ redirect: community.general.rocketchat
+ say:
+ redirect: community.general.say
+ sendgrid:
+ redirect: community.general.sendgrid
+ slack:
+ redirect: community.general.slack
+ syslogger:
+ redirect: community.general.syslogger
+ telegram:
+ redirect: community.general.telegram
+ twilio:
+ redirect: community.general.twilio
+ typetalk:
+ redirect: community.general.typetalk
+ bower:
+ redirect: community.general.bower
+ bundler:
+ redirect: community.general.bundler
+ composer:
+ redirect: community.general.composer
+ cpanm:
+ redirect: community.general.cpanm
+ easy_install:
+ redirect: community.general.easy_install
+ gem:
+ redirect: community.general.gem
+ maven_artifact:
+ redirect: community.general.maven_artifact
+ npm:
+ redirect: community.general.npm
+ pear:
+ redirect: community.general.pear
+ pip_package_info:
+ redirect: community.general.pip_package_info
+ yarn:
+ redirect: community.general.yarn
+ apk:
+ redirect: community.general.apk
+ apt_rpm:
+ redirect: community.general.apt_rpm
+ flatpak:
+ redirect: community.general.flatpak
+ flatpak_remote:
+ redirect: community.general.flatpak_remote
+ homebrew:
+ redirect: community.general.homebrew
+ homebrew_cask:
+ redirect: community.general.homebrew_cask
+ homebrew_tap:
+ redirect: community.general.homebrew_tap
+ installp:
+ redirect: community.general.installp
+ layman:
+ redirect: community.general.layman
+ macports:
+ redirect: community.general.macports
+ mas:
+ redirect: community.general.mas
+ openbsd_pkg:
+ redirect: community.general.openbsd_pkg
+ opkg:
+ redirect: community.general.opkg
+ pacman:
+ redirect: community.general.pacman
+ pkg5:
+ redirect: community.general.pkg5
+ pkg5_publisher:
+ redirect: community.general.pkg5_publisher
+ pkgin:
+ redirect: community.general.pkgin
+ pkgng:
+ redirect: community.general.pkgng
+ pkgutil:
+ redirect: community.general.pkgutil
+ portage:
+ redirect: community.general.portage
+ portinstall:
+ redirect: community.general.portinstall
+ pulp_repo:
+ redirect: community.general.pulp_repo
+ redhat_subscription:
+ redirect: community.general.redhat_subscription
+ rhn_channel:
+ redirect: community.general.rhn_channel
+ rhn_register:
+ redirect: community.general.rhn_register
+ rhsm_release:
+ redirect: community.general.rhsm_release
+ rhsm_repository:
+ redirect: community.general.rhsm_repository
+ slackpkg:
+ redirect: community.general.slackpkg
+ snap:
+ redirect: community.general.snap
+ sorcery:
+ redirect: community.general.sorcery
+ svr4pkg:
+ redirect: community.general.svr4pkg
+ swdepot:
+ redirect: community.general.swdepot
+ swupd:
+ redirect: community.general.swupd
+ urpmi:
+ redirect: community.general.urpmi
+ xbps:
+ redirect: community.general.xbps
+ zypper:
+ redirect: community.general.zypper
+ zypper_repository:
+ redirect: community.general.zypper_repository
+ cobbler_sync:
+ redirect: community.general.cobbler_sync
+ cobbler_system:
+ redirect: community.general.cobbler_system
+ idrac_firmware:
+ redirect: dellemc.openmanage.idrac_firmware
+ idrac_server_config_profile:
+ redirect: dellemc.openmanage.idrac_server_config_profile
+ ome_device_info:
+ redirect: dellemc.openmanage.ome_device_info
+ foreman:
+ redirect: community.general.foreman
+ katello:
+ redirect: community.general.katello
+ hpilo_facts:
+ redirect: community.general.hpilo_facts
+ hpilo_boot:
+ redirect: community.general.hpilo_boot
+ hpilo_info:
+ redirect: community.general.hpilo_info
+ hponcfg:
+ redirect: community.general.hponcfg
+ imc_rest:
+ redirect: community.general.imc_rest
+ ipmi_boot:
+ redirect: community.general.ipmi_boot
+ ipmi_power:
+ redirect: community.general.ipmi_power
+ lxca_cmms:
+ redirect: community.general.lxca_cmms
+ lxca_nodes:
+ redirect: community.general.lxca_nodes
+ manageiq_alert_profiles:
+ redirect: community.general.manageiq_alert_profiles
+ manageiq_alerts:
+ redirect: community.general.manageiq_alerts
+ manageiq_group:
+ redirect: community.general.manageiq_group
+ manageiq_policies:
+ redirect: community.general.manageiq_policies
+ manageiq_provider:
+ redirect: community.general.manageiq_provider
+ manageiq_tags:
+ redirect: community.general.manageiq_tags
+ manageiq_tenant:
+ redirect: community.general.manageiq_tenant
+ manageiq_user:
+ redirect: community.general.manageiq_user
+ oneview_datacenter_facts:
+ redirect: community.general.oneview_datacenter_facts
+ oneview_enclosure_facts:
+ redirect: community.general.oneview_enclosure_facts
+ oneview_ethernet_network_facts:
+ redirect: community.general.oneview_ethernet_network_facts
+ oneview_fc_network_facts:
+ redirect: community.general.oneview_fc_network_facts
+ oneview_fcoe_network_facts:
+ redirect: community.general.oneview_fcoe_network_facts
+ oneview_logical_interconnect_group_facts:
+ redirect: community.general.oneview_logical_interconnect_group_facts
+ oneview_network_set_facts:
+ redirect: community.general.oneview_network_set_facts
+ oneview_san_manager_facts:
+ redirect: community.general.oneview_san_manager_facts
+ oneview_datacenter_info:
+ redirect: community.general.oneview_datacenter_info
+ oneview_enclosure_info:
+ redirect: community.general.oneview_enclosure_info
+ oneview_ethernet_network:
+ redirect: community.general.oneview_ethernet_network
+ oneview_ethernet_network_info:
+ redirect: community.general.oneview_ethernet_network_info
+ oneview_fc_network:
+ redirect: community.general.oneview_fc_network
+ oneview_fc_network_info:
+ redirect: community.general.oneview_fc_network_info
+ oneview_fcoe_network:
+ redirect: community.general.oneview_fcoe_network
+ oneview_fcoe_network_info:
+ redirect: community.general.oneview_fcoe_network_info
+ oneview_logical_interconnect_group:
+ redirect: community.general.oneview_logical_interconnect_group
+ oneview_logical_interconnect_group_info:
+ redirect: community.general.oneview_logical_interconnect_group_info
+ oneview_network_set:
+ redirect: community.general.oneview_network_set
+ oneview_network_set_info:
+ redirect: community.general.oneview_network_set_info
+ oneview_san_manager:
+ redirect: community.general.oneview_san_manager
+ oneview_san_manager_info:
+ redirect: community.general.oneview_san_manager_info
+ idrac_redfish_facts:
+ redirect: community.general.idrac_redfish_facts
+ redfish_facts:
+ redirect: community.general.redfish_facts
+ idrac_redfish_command:
+ redirect: community.general.idrac_redfish_command
+ idrac_redfish_config:
+ redirect: community.general.idrac_redfish_config
+ idrac_redfish_info:
+ redirect: community.general.idrac_redfish_info
+ redfish_command:
+ redirect: community.general.redfish_command
+ redfish_config:
+ redirect: community.general.redfish_config
+ redfish_info:
+ redirect: community.general.redfish_info
+ stacki_host:
+ redirect: community.general.stacki_host
+ wakeonlan:
+ redirect: community.general.wakeonlan
+ bitbucket_access_key:
+ redirect: community.general.bitbucket_access_key
+ bitbucket_pipeline_key_pair:
+ redirect: community.general.bitbucket_pipeline_key_pair
+ bitbucket_pipeline_known_host:
+ redirect: community.general.bitbucket_pipeline_known_host
+ bitbucket_pipeline_variable:
+ redirect: community.general.bitbucket_pipeline_variable
+ bzr:
+ redirect: community.general.bzr
+ git_config:
+ redirect: community.general.git_config
+ github_hooks:
+ redirect: community.general.github_hooks
+ github_webhook_facts:
+ redirect: community.general.github_webhook_info
+ github_deploy_key:
+ redirect: community.general.github_deploy_key
+ github_issue:
+ redirect: community.general.github_issue
+ github_key:
+ redirect: community.general.github_key
+ github_release:
+ redirect: community.general.github_release
+ github_webhook:
+ redirect: community.general.github_webhook
+ github_webhook_info:
+ redirect: community.general.github_webhook_info
+ gitlab_hooks:
+ redirect: community.general.gitlab_hook
+ gitlab_deploy_key:
+ redirect: community.general.gitlab_deploy_key
+ gitlab_group:
+ redirect: community.general.gitlab_group
+ gitlab_hook:
+ redirect: community.general.gitlab_hook
+ gitlab_project:
+ redirect: community.general.gitlab_project
+ gitlab_project_variable:
+ redirect: community.general.gitlab_project_variable
+ gitlab_runner:
+ redirect: community.general.gitlab_runner
+ gitlab_user:
+ redirect: community.general.gitlab_user
+ hg:
+ redirect: community.general.hg
+ emc_vnx_sg_member:
+ redirect: community.general.emc_vnx_sg_member
+ gluster_heal_facts:
+ redirect: gluster.gluster.gluster_heal_info
+ gluster_heal_info:
+ redirect: gluster.gluster.gluster_heal_info
+ gluster_peer:
+ redirect: gluster.gluster.gluster_peer
+ gluster_volume:
+ redirect: gluster.gluster.gluster_volume
+ ss_3par_cpg:
+ redirect: community.general.ss_3par_cpg
+ ibm_sa_domain:
+ redirect: community.general.ibm_sa_domain
+ ibm_sa_host:
+ redirect: community.general.ibm_sa_host
+ ibm_sa_host_ports:
+ redirect: community.general.ibm_sa_host_ports
+ ibm_sa_pool:
+ redirect: community.general.ibm_sa_pool
+ ibm_sa_vol:
+ redirect: community.general.ibm_sa_vol
+ ibm_sa_vol_map:
+ redirect: community.general.ibm_sa_vol_map
+ infini_export:
+ redirect: infinidat.infinibox.infini_export
+ infini_export_client:
+ redirect: infinidat.infinibox.infini_export_client
+ infini_fs:
+ redirect: infinidat.infinibox.infini_fs
+ infini_host:
+ redirect: infinidat.infinibox.infini_host
+ infini_pool:
+ redirect: infinidat.infinibox.infini_pool
+ infini_vol:
+ redirect: infinidat.infinibox.infini_vol
+ na_cdot_aggregate:
+ redirect: community.general.na_cdot_aggregate
+ na_cdot_license:
+ redirect: community.general.na_cdot_license
+ na_cdot_lun:
+ redirect: community.general.na_cdot_lun
+ na_cdot_qtree:
+ redirect: community.general.na_cdot_qtree
+ na_cdot_svm:
+ redirect: community.general.na_cdot_svm
+ na_cdot_user:
+ redirect: community.general.na_cdot_user
+ na_cdot_user_role:
+ redirect: community.general.na_cdot_user_role
+ na_cdot_volume:
+ redirect: community.general.na_cdot_volume
+ na_ontap_gather_facts:
+ redirect: community.general.na_ontap_gather_facts
+ sf_account_manager:
+ redirect: community.general.sf_account_manager
+ sf_check_connections:
+ redirect: community.general.sf_check_connections
+ sf_snapshot_schedule_manager:
+ redirect: community.general.sf_snapshot_schedule_manager
+ sf_volume_access_group_manager:
+ redirect: community.general.sf_volume_access_group_manager
+ sf_volume_manager:
+ redirect: community.general.sf_volume_manager
+ netapp_e_alerts:
+ redirect: netapp_eseries.santricity.netapp_e_alerts
+ netapp_e_amg:
+ redirect: netapp_eseries.santricity.netapp_e_amg
+ netapp_e_amg_role:
+ redirect: netapp_eseries.santricity.netapp_e_amg_role
+ netapp_e_amg_sync:
+ redirect: netapp_eseries.santricity.netapp_e_amg_sync
+ netapp_e_asup:
+ redirect: netapp_eseries.santricity.netapp_e_asup
+ netapp_e_auditlog:
+ redirect: netapp_eseries.santricity.netapp_e_auditlog
+ netapp_e_auth:
+ redirect: netapp_eseries.santricity.netapp_e_auth
+ netapp_e_drive_firmware:
+ redirect: netapp_eseries.santricity.netapp_e_drive_firmware
+ netapp_e_facts:
+ redirect: netapp_eseries.santricity.netapp_e_facts
+ netapp_e_firmware:
+ redirect: netapp_eseries.santricity.netapp_e_firmware
+ netapp_e_flashcache:
+ redirect: netapp_eseries.santricity.netapp_e_flashcache
+ netapp_e_global:
+ redirect: netapp_eseries.santricity.netapp_e_global
+ netapp_e_host:
+ redirect: netapp_eseries.santricity.netapp_e_host
+ netapp_e_hostgroup:
+ redirect: netapp_eseries.santricity.netapp_e_hostgroup
+ netapp_e_iscsi_interface:
+ redirect: netapp_eseries.santricity.netapp_e_iscsi_interface
+ netapp_e_iscsi_target:
+ redirect: netapp_eseries.santricity.netapp_e_iscsi_target
+ netapp_e_ldap:
+ redirect: netapp_eseries.santricity.netapp_e_ldap
+ netapp_e_lun_mapping:
+ redirect: netapp_eseries.santricity.netapp_e_lun_mapping
+ netapp_e_mgmt_interface:
+ redirect: netapp_eseries.santricity.netapp_e_mgmt_interface
+ netapp_e_snapshot_group:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_group
+ netapp_e_snapshot_images:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_images
+ netapp_e_snapshot_volume:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_volume
+ netapp_e_storage_system:
+ redirect: netapp_eseries.santricity.netapp_e_storage_system
+ netapp_e_storagepool:
+ redirect: netapp_eseries.santricity.netapp_e_storagepool
+ netapp_e_syslog:
+ redirect: netapp_eseries.santricity.netapp_e_syslog
+ netapp_e_volume:
+ redirect: netapp_eseries.santricity.netapp_e_volume
+ netapp_e_volume_copy:
+ redirect: netapp_eseries.santricity.netapp_e_volume_copy
+ purefa_facts:
+ redirect: community.general.purefa_facts
+ purefb_facts:
+ redirect: community.general.purefb_facts
+ vexata_eg:
+ redirect: community.general.vexata_eg
+ vexata_volume:
+ redirect: community.general.vexata_volume
+ zfs:
+ redirect: community.general.zfs
+ zfs_delegate_admin:
+ redirect: community.general.zfs_delegate_admin
+ zfs_facts:
+ redirect: community.general.zfs_facts
+ zpool_facts:
+ redirect: community.general.zpool_facts
+ python_requirements_facts:
+ redirect: community.general.python_requirements_facts
+ aix_devices:
+ redirect: community.general.aix_devices
+ aix_filesystem:
+ redirect: community.general.aix_filesystem
+ aix_inittab:
+ redirect: community.general.aix_inittab
+ aix_lvg:
+ redirect: community.general.aix_lvg
+ aix_lvol:
+ redirect: community.general.aix_lvol
+ alternatives:
+ redirect: community.general.alternatives
+ awall:
+ redirect: community.general.awall
+ beadm:
+ redirect: community.general.beadm
+ capabilities:
+ redirect: community.general.capabilities
+ cronvar:
+ redirect: community.general.cronvar
+ crypttab:
+ redirect: community.general.crypttab
+ dconf:
+ redirect: community.general.dconf
+ facter:
+ redirect: community.general.facter
+ filesystem:
+ redirect: community.general.filesystem
+ firewalld:
+ redirect: ansible.posix.firewalld
+ gconftool2:
+ redirect: community.general.gconftool2
+ interfaces_file:
+ redirect: community.general.interfaces_file
+ java_cert:
+ redirect: community.general.java_cert
+ java_keystore:
+ redirect: community.general.java_keystore
+ kernel_blacklist:
+ redirect: community.general.kernel_blacklist
+ lbu:
+ redirect: community.general.lbu
+ listen_ports_facts:
+ redirect: community.general.listen_ports_facts
+ locale_gen:
+ redirect: community.general.locale_gen
+ lvg:
+ redirect: community.general.lvg
+ lvol:
+ redirect: community.general.lvol
+ make:
+ redirect: community.general.make
+ mksysb:
+ redirect: community.general.mksysb
+ modprobe:
+ redirect: community.general.modprobe
+ nosh:
+ redirect: community.general.nosh
+ ohai:
+ redirect: community.general.ohai
+ open_iscsi:
+ redirect: community.general.open_iscsi
+ openwrt_init:
+ redirect: community.general.openwrt_init
+ osx_defaults:
+ redirect: community.general.osx_defaults
+ pam_limits:
+ redirect: community.general.pam_limits
+ pamd:
+ redirect: community.general.pamd
+ parted:
+ redirect: community.general.parted
+ pids:
+ redirect: community.general.pids
+ puppet:
+ redirect: community.general.puppet
+ python_requirements_info:
+ redirect: community.general.python_requirements_info
+ runit:
+ redirect: community.general.runit
+ sefcontext:
+ redirect: community.general.sefcontext
+ selinux_permissive:
+ redirect: community.general.selinux_permissive
+ selogin:
+ redirect: community.general.selogin
+ seport:
+ redirect: community.general.seport
+ solaris_zone:
+ redirect: community.general.solaris_zone
+ svc:
+ redirect: community.general.svc
+ syspatch:
+ redirect: community.general.syspatch
+ timezone:
+ redirect: community.general.timezone
+ ufw:
+ redirect: community.general.ufw
+ vdo:
+ redirect: community.general.vdo
+ xfconf:
+ redirect: community.general.xfconf
+ xfs_quota:
+ redirect: community.general.xfs_quota
+ jenkins_job_facts:
+ redirect: community.general.jenkins_job_facts
+ nginx_status_facts:
+ redirect: community.general.nginx_status_facts
+ apache2_mod_proxy:
+ redirect: community.general.apache2_mod_proxy
+ apache2_module:
+ redirect: community.general.apache2_module
+ deploy_helper:
+ redirect: community.general.deploy_helper
+ django_manage:
+ redirect: community.general.django_manage
+ ejabberd_user:
+ redirect: community.general.ejabberd_user
+ gunicorn:
+ redirect: community.general.gunicorn
+ htpasswd:
+ redirect: community.general.htpasswd
+ jboss:
+ redirect: community.general.jboss
+ jenkins_job:
+ redirect: community.general.jenkins_job
+ jenkins_job_info:
+ redirect: community.general.jenkins_job_info
+ jenkins_plugin:
+ redirect: community.general.jenkins_plugin
+ jenkins_script:
+ redirect: community.general.jenkins_script
+ jira:
+ redirect: community.general.jira
+ nginx_status_info:
+ redirect: community.general.nginx_status_info
+ rundeck_acl_policy:
+ redirect: community.general.rundeck_acl_policy
+ rundeck_project:
+ redirect: community.general.rundeck_project
+ utm_aaa_group:
+ redirect: community.general.utm_aaa_group
+ utm_aaa_group_info:
+ redirect: community.general.utm_aaa_group_info
+ utm_ca_host_key_cert:
+ redirect: community.general.utm_ca_host_key_cert
+ utm_ca_host_key_cert_info:
+ redirect: community.general.utm_ca_host_key_cert_info
+ utm_dns_host:
+ redirect: community.general.utm_dns_host
+ utm_network_interface_address:
+ redirect: community.general.utm_network_interface_address
+ utm_network_interface_address_info:
+ redirect: community.general.utm_network_interface_address_info
+ utm_proxy_auth_profile:
+ redirect: community.general.utm_proxy_auth_profile
+ utm_proxy_exception:
+ redirect: community.general.utm_proxy_exception
+ utm_proxy_frontend:
+ redirect: community.general.utm_proxy_frontend
+ utm_proxy_frontend_info:
+ redirect: community.general.utm_proxy_frontend_info
+ utm_proxy_location:
+ redirect: community.general.utm_proxy_location
+ utm_proxy_location_info:
+ redirect: community.general.utm_proxy_location_info
+ supervisorctl:
+ redirect: community.general.supervisorctl
+ taiga_issue:
+ redirect: community.general.taiga_issue
+ grafana_dashboard:
+ redirect: community.grafana.grafana_dashboard
+ grafana_datasource:
+ redirect: community.grafana.grafana_datasource
+ grafana_plugin:
+ redirect: community.grafana.grafana_plugin
+ k8s_facts:
+ redirect: kubernetes.core.k8s_facts
+ k8s_raw:
+ redirect: kubernetes.core.k8s_raw
+ k8s:
+ redirect: kubernetes.core.k8s
+ k8s_auth:
+ redirect: kubernetes.core.k8s_auth
+ k8s_info:
+ redirect: kubernetes.core.k8s_info
+ k8s_scale:
+ redirect: kubernetes.core.k8s_scale
+ k8s_service:
+ redirect: kubernetes.core.k8s_service
+ openshift_raw:
+ redirect: kubernetes.core.openshift_raw
+ openshift_scale:
+ redirect: kubernetes.core.openshift_scale
+ openssh_cert:
+ redirect: community.crypto.openssh_cert
+ openssl_pkcs12:
+ redirect: community.crypto.openssl_pkcs12
+ openssl_csr:
+ redirect: community.crypto.openssl_csr
+ openssl_certificate:
+ redirect: community.crypto.x509_certificate
+ openssl_certificate_info:
+ redirect: community.crypto.x509_certificate_info
+ x509_crl:
+ redirect: community.crypto.x509_crl
+ openssl_privatekey_info:
+ redirect: community.crypto.openssl_privatekey_info
+ x509_crl_info:
+ redirect: community.crypto.x509_crl_info
+ get_certificate:
+ redirect: community.crypto.get_certificate
+ openssh_keypair:
+ redirect: community.crypto.openssh_keypair
+ openssl_publickey:
+ redirect: community.crypto.openssl_publickey
+ openssl_csr_info:
+ redirect: community.crypto.openssl_csr_info
+ luks_device:
+ redirect: community.crypto.luks_device
+ openssl_dhparam:
+ redirect: community.crypto.openssl_dhparam
+ openssl_privatekey:
+ redirect: community.crypto.openssl_privatekey
+ certificate_complete_chain:
+ redirect: community.crypto.certificate_complete_chain
+ acme_inspect:
+ redirect: community.crypto.acme_inspect
+ acme_certificate_revoke:
+ redirect: community.crypto.acme_certificate_revoke
+ acme_certificate:
+ redirect: community.crypto.acme_certificate
+ acme_account:
+ redirect: community.crypto.acme_account
+ acme_account_facts:
+ redirect: community.crypto.acme_account_facts
+ acme_challenge_cert_helper:
+ redirect: community.crypto.acme_challenge_cert_helper
+ acme_account_info:
+ redirect: community.crypto.acme_account_info
+ ecs_domain:
+ redirect: community.crypto.ecs_domain
+ ecs_certificate:
+ redirect: community.crypto.ecs_certificate
+ mongodb_parameter:
+ redirect: community.mongodb.mongodb_parameter
+ mongodb_info:
+ redirect: community.mongodb.mongodb_info
+ mongodb_replicaset:
+ redirect: community.mongodb.mongodb_replicaset
+ mongodb_user:
+ redirect: community.mongodb.mongodb_user
+ mongodb_shard:
+ redirect: community.mongodb.mongodb_shard
+ vmware_appliance_access_info:
+ redirect: vmware.vmware_rest.vmware_appliance_access_info
+ vmware_appliance_health_info:
+ redirect: vmware.vmware_rest.vmware_appliance_health_info
+ vmware_cis_category_info:
+ redirect: vmware.vmware_rest.vmware_cis_category_info
+ vmware_core_info:
+ redirect: vmware.vmware_rest.vmware_core_info
+ vcenter_extension_facts:
+ redirect: community.vmware.vcenter_extension_facts
+ vmware_about_facts:
+ redirect: community.vmware.vmware_about_facts
+ vmware_category_facts:
+ redirect: community.vmware.vmware_category_facts
+ vmware_cluster_facts:
+ redirect: community.vmware.vmware_cluster_facts
+ vmware_datastore_facts:
+ redirect: community.vmware.vmware_datastore_facts
+ vmware_dns_config:
+ redirect: community.vmware.vmware_dns_config
+ vmware_drs_group_facts:
+ redirect: community.vmware.vmware_drs_group_facts
+ vmware_drs_rule_facts:
+ redirect: community.vmware.vmware_drs_rule_facts
+ vmware_dvs_portgroup_facts:
+ redirect: community.vmware.vmware_dvs_portgroup_facts
+ vmware_guest_boot_facts:
+ redirect: community.vmware.vmware_guest_boot_facts
+ vmware_guest_customization_facts:
+ redirect: community.vmware.vmware_guest_customization_facts
+ vmware_guest_disk_facts:
+ redirect: community.vmware.vmware_guest_disk_facts
+ vmware_guest_facts:
+ redirect: community.vmware.vmware_guest_facts
+ vmware_guest_snapshot_facts:
+ redirect: community.vmware.vmware_guest_snapshot_facts
+ vmware_host_capability_facts:
+ redirect: community.vmware.vmware_host_capability_facts
+ vmware_host_config_facts:
+ redirect: community.vmware.vmware_host_config_facts
+ vmware_host_dns_facts:
+ redirect: community.vmware.vmware_host_dns_facts
+ vmware_host_feature_facts:
+ redirect: community.vmware.vmware_host_feature_facts
+ vmware_host_firewall_facts:
+ redirect: community.vmware.vmware_host_firewall_facts
+ vmware_host_ntp_facts:
+ redirect: community.vmware.vmware_host_ntp_facts
+ vmware_host_package_facts:
+ redirect: community.vmware.vmware_host_package_facts
+ vmware_host_service_facts:
+ redirect: community.vmware.vmware_host_service_facts
+ vmware_host_ssl_facts:
+ redirect: community.vmware.vmware_host_ssl_facts
+ vmware_host_vmhba_facts:
+ redirect: community.vmware.vmware_host_vmhba_facts
+ vmware_host_vmnic_facts:
+ redirect: community.vmware.vmware_host_vmnic_facts
+ vmware_local_role_facts:
+ redirect: community.vmware.vmware_local_role_facts
+ vmware_local_user_facts:
+ redirect: community.vmware.vmware_local_user_facts
+ vmware_portgroup_facts:
+ redirect: community.vmware.vmware_portgroup_facts
+ vmware_resource_pool_facts:
+ redirect: community.vmware.vmware_resource_pool_facts
+ vmware_tag_facts:
+ redirect: community.vmware.vmware_tag_facts
+ vmware_target_canonical_facts:
+ redirect: community.vmware.vmware_target_canonical_facts
+ vmware_vm_facts:
+ redirect: community.vmware.vmware_vm_facts
+ vmware_vmkernel_facts:
+ redirect: community.vmware.vmware_vmkernel_facts
+ vmware_vswitch_facts:
+ redirect: community.vmware.vmware_vswitch_facts
+ vca_fw:
+ redirect: community.vmware.vca_fw
+ vca_nat:
+ redirect: community.vmware.vca_nat
+ vca_vapp:
+ redirect: community.vmware.vca_vapp
+ vcenter_extension:
+ redirect: community.vmware.vcenter_extension
+ vcenter_extension_info:
+ redirect: community.vmware.vcenter_extension_info
+ vcenter_folder:
+ redirect: community.vmware.vcenter_folder
+ vcenter_license:
+ redirect: community.vmware.vcenter_license
+ vmware_about_info:
+ redirect: community.vmware.vmware_about_info
+ vmware_category:
+ redirect: community.vmware.vmware_category
+ vmware_category_info:
+ redirect: community.vmware.vmware_category_info
+ vmware_cfg_backup:
+ redirect: community.vmware.vmware_cfg_backup
+ vmware_cluster:
+ redirect: community.vmware.vmware_cluster
+ vmware_cluster_drs:
+ redirect: community.vmware.vmware_cluster_drs
+ vmware_cluster_ha:
+ redirect: community.vmware.vmware_cluster_ha
+ vmware_cluster_info:
+ redirect: community.vmware.vmware_cluster_info
+ vmware_cluster_vsan:
+ redirect: community.vmware.vmware_cluster_vsan
+ vmware_content_deploy_template:
+ redirect: community.vmware.vmware_content_deploy_template
+ vmware_content_library_info:
+ redirect: community.vmware.vmware_content_library_info
+ vmware_content_library_manager:
+ redirect: community.vmware.vmware_content_library_manager
+ vmware_datacenter:
+ redirect: community.vmware.vmware_datacenter
+ vmware_datastore_cluster:
+ redirect: community.vmware.vmware_datastore_cluster
+ vmware_datastore_info:
+ redirect: community.vmware.vmware_datastore_info
+ vmware_datastore_maintenancemode:
+ redirect: community.vmware.vmware_datastore_maintenancemode
+ vmware_deploy_ovf:
+ redirect: community.vmware.vmware_deploy_ovf
+ vmware_drs_group:
+ redirect: community.vmware.vmware_drs_group
+ vmware_drs_group_info:
+ redirect: community.vmware.vmware_drs_group_info
+ vmware_drs_rule_info:
+ redirect: community.vmware.vmware_drs_rule_info
+ vmware_dvs_host:
+ redirect: community.vmware.vmware_dvs_host
+ vmware_dvs_portgroup:
+ redirect: community.vmware.vmware_dvs_portgroup
+ vmware_dvs_portgroup_find:
+ redirect: community.vmware.vmware_dvs_portgroup_find
+ vmware_dvs_portgroup_info:
+ redirect: community.vmware.vmware_dvs_portgroup_info
+ vmware_dvswitch:
+ redirect: community.vmware.vmware_dvswitch
+ vmware_dvswitch_lacp:
+ redirect: community.vmware.vmware_dvswitch_lacp
+ vmware_dvswitch_nioc:
+ redirect: community.vmware.vmware_dvswitch_nioc
+ vmware_dvswitch_pvlans:
+ redirect: community.vmware.vmware_dvswitch_pvlans
+ vmware_dvswitch_uplink_pg:
+ redirect: community.vmware.vmware_dvswitch_uplink_pg
+ vmware_evc_mode:
+ redirect: community.vmware.vmware_evc_mode
+ vmware_export_ovf:
+ redirect: community.vmware.vmware_export_ovf
+ vmware_folder_info:
+ redirect: community.vmware.vmware_folder_info
+ vmware_guest:
+ redirect: community.vmware.vmware_guest
+ vmware_guest_boot_info:
+ redirect: community.vmware.vmware_guest_boot_info
+ vmware_guest_boot_manager:
+ redirect: community.vmware.vmware_guest_boot_manager
+ vmware_guest_controller:
+ redirect: community.vmware.vmware_guest_controller
+ vmware_guest_cross_vc_clone:
+ redirect: community.vmware.vmware_guest_cross_vc_clone
+ vmware_guest_custom_attribute_defs:
+ redirect: community.vmware.vmware_guest_custom_attribute_defs
+ vmware_guest_custom_attributes:
+ redirect: community.vmware.vmware_guest_custom_attributes
+ vmware_guest_customization_info:
+ redirect: community.vmware.vmware_guest_customization_info
+ vmware_guest_disk:
+ redirect: community.vmware.vmware_guest_disk
+ vmware_guest_disk_info:
+ redirect: community.vmware.vmware_guest_disk_info
+ vmware_guest_file_operation:
+ redirect: community.vmware.vmware_guest_file_operation
+ vmware_guest_find:
+ redirect: community.vmware.vmware_guest_find
+ vmware_guest_info:
+ redirect: community.vmware.vmware_guest_info
+ vmware_guest_move:
+ redirect: community.vmware.vmware_guest_move
+ vmware_guest_network:
+ redirect: community.vmware.vmware_guest_network
+ vmware_guest_powerstate:
+ redirect: community.vmware.vmware_guest_powerstate
+ vmware_guest_register_operation:
+ redirect: community.vmware.vmware_guest_register_operation
+ vmware_guest_screenshot:
+ redirect: community.vmware.vmware_guest_screenshot
+ vmware_guest_sendkey:
+ redirect: community.vmware.vmware_guest_sendkey
+ vmware_guest_serial_port:
+ redirect: community.vmware.vmware_guest_serial_port
+ vmware_guest_snapshot:
+ redirect: community.vmware.vmware_guest_snapshot
+ vmware_guest_snapshot_info:
+ redirect: community.vmware.vmware_guest_snapshot_info
+ vmware_guest_tools_info:
+ redirect: community.vmware.vmware_guest_tools_info
+ vmware_guest_tools_upgrade:
+ redirect: community.vmware.vmware_guest_tools_upgrade
+ vmware_guest_tools_wait:
+ redirect: community.vmware.vmware_guest_tools_wait
+ vmware_guest_video:
+ redirect: community.vmware.vmware_guest_video
+ vmware_guest_vnc:
+ redirect: community.vmware.vmware_guest_vnc
+ vmware_host:
+ redirect: community.vmware.vmware_host
+ vmware_host_acceptance:
+ redirect: community.vmware.vmware_host_acceptance
+ vmware_host_active_directory:
+ redirect: community.vmware.vmware_host_active_directory
+ vmware_host_auto_start:
+ redirect: community.vmware.vmware_host_auto_start
+ vmware_host_capability_info:
+ redirect: community.vmware.vmware_host_capability_info
+ vmware_host_config_info:
+ redirect: community.vmware.vmware_host_config_info
+ vmware_host_config_manager:
+ redirect: community.vmware.vmware_host_config_manager
+ vmware_host_datastore:
+ redirect: community.vmware.vmware_host_datastore
+ vmware_host_dns:
+ redirect: community.vmware.vmware_host_dns
+ vmware_host_dns_info:
+ redirect: community.vmware.vmware_host_dns_info
+ vmware_host_facts:
+ redirect: community.vmware.vmware_host_facts
+ vmware_host_feature_info:
+ redirect: community.vmware.vmware_host_feature_info
+ vmware_host_firewall_info:
+ redirect: community.vmware.vmware_host_firewall_info
+ vmware_host_firewall_manager:
+ redirect: community.vmware.vmware_host_firewall_manager
+ vmware_host_hyperthreading:
+ redirect: community.vmware.vmware_host_hyperthreading
+ vmware_host_ipv6:
+ redirect: community.vmware.vmware_host_ipv6
+ vmware_host_kernel_manager:
+ redirect: community.vmware.vmware_host_kernel_manager
+ vmware_host_lockdown:
+ redirect: community.vmware.vmware_host_lockdown
+ vmware_host_ntp:
+ redirect: community.vmware.vmware_host_ntp
+ vmware_host_ntp_info:
+ redirect: community.vmware.vmware_host_ntp_info
+ vmware_host_package_info:
+ redirect: community.vmware.vmware_host_package_info
+ vmware_host_powermgmt_policy:
+ redirect: community.vmware.vmware_host_powermgmt_policy
+ vmware_host_powerstate:
+ redirect: community.vmware.vmware_host_powerstate
+ vmware_host_scanhba:
+ redirect: community.vmware.vmware_host_scanhba
+ vmware_host_service_info:
+ redirect: community.vmware.vmware_host_service_info
+ vmware_host_service_manager:
+ redirect: community.vmware.vmware_host_service_manager
+ vmware_host_snmp:
+ redirect: community.vmware.vmware_host_snmp
+ vmware_host_ssl_info:
+ redirect: community.vmware.vmware_host_ssl_info
+ vmware_host_vmhba_info:
+ redirect: community.vmware.vmware_host_vmhba_info
+ vmware_host_vmnic_info:
+ redirect: community.vmware.vmware_host_vmnic_info
+ vmware_local_role_info:
+ redirect: community.vmware.vmware_local_role_info
+ vmware_local_role_manager:
+ redirect: community.vmware.vmware_local_role_manager
+ vmware_local_user_info:
+ redirect: community.vmware.vmware_local_user_info
+ vmware_local_user_manager:
+ redirect: community.vmware.vmware_local_user_manager
+ vmware_maintenancemode:
+ redirect: community.vmware.vmware_maintenancemode
+ vmware_migrate_vmk:
+ redirect: community.vmware.vmware_migrate_vmk
+ vmware_object_role_permission:
+ redirect: community.vmware.vmware_object_role_permission
+ vmware_portgroup:
+ redirect: community.vmware.vmware_portgroup
+ vmware_portgroup_info:
+ redirect: community.vmware.vmware_portgroup_info
+ vmware_resource_pool:
+ redirect: community.vmware.vmware_resource_pool
+ vmware_resource_pool_info:
+ redirect: community.vmware.vmware_resource_pool_info
+ vmware_tag:
+ redirect: community.vmware.vmware_tag
+ vmware_tag_info:
+ redirect: community.vmware.vmware_tag_info
+ vmware_tag_manager:
+ redirect: community.vmware.vmware_tag_manager
+ vmware_target_canonical_info:
+ redirect: community.vmware.vmware_target_canonical_info
+ vmware_vcenter_settings:
+ redirect: community.vmware.vmware_vcenter_settings
+ vmware_vcenter_statistics:
+ redirect: community.vmware.vmware_vcenter_statistics
+ vmware_vm_host_drs_rule:
+ redirect: community.vmware.vmware_vm_host_drs_rule
+ vmware_vm_info:
+ redirect: community.vmware.vmware_vm_info
+ vmware_vm_shell:
+ redirect: community.vmware.vmware_vm_shell
+ vmware_vm_storage_policy_info:
+ redirect: community.vmware.vmware_vm_storage_policy_info
+ vmware_vm_vm_drs_rule:
+ redirect: community.vmware.vmware_vm_vm_drs_rule
+ vmware_vm_vss_dvs_migrate:
+ redirect: community.vmware.vmware_vm_vss_dvs_migrate
+ vmware_vmkernel:
+ redirect: community.vmware.vmware_vmkernel
+ vmware_vmkernel_info:
+ redirect: community.vmware.vmware_vmkernel_info
+ vmware_vmkernel_ip_config:
+ redirect: community.vmware.vmware_vmkernel_ip_config
+ vmware_vmotion:
+ redirect: community.vmware.vmware_vmotion
+ vmware_vsan_cluster:
+ redirect: community.vmware.vmware_vsan_cluster
+ vmware_vsan_health_info:
+ redirect: community.vmware.vmware_vsan_health_info
+ vmware_vspan_session:
+ redirect: community.vmware.vmware_vspan_session
+ vmware_vswitch:
+ redirect: community.vmware.vmware_vswitch
+ vmware_vswitch_info:
+ redirect: community.vmware.vmware_vswitch_info
+ vsphere_copy:
+ redirect: community.vmware.vsphere_copy
+ vsphere_file:
+ redirect: community.vmware.vsphere_file
+ psexec:
+ redirect: community.windows.psexec
+ win_audit_policy_system:
+ redirect: community.windows.win_audit_policy_system
+ win_audit_rule:
+ redirect: community.windows.win_audit_rule
+ win_chocolatey:
+ redirect: chocolatey.chocolatey.win_chocolatey
+ win_chocolatey_config:
+ redirect: chocolatey.chocolatey.win_chocolatey_config
+ win_chocolatey_facts:
+ redirect: chocolatey.chocolatey.win_chocolatey_facts
+ win_chocolatey_feature:
+ redirect: chocolatey.chocolatey.win_chocolatey_feature
+ win_chocolatey_source:
+ redirect: chocolatey.chocolatey.win_chocolatey_source
+ win_credential:
+ redirect: community.windows.win_credential
+ win_defrag:
+ redirect: community.windows.win_defrag
+ win_disk_facts:
+ redirect: community.windows.win_disk_facts
+ win_disk_image:
+ redirect: community.windows.win_disk_image
+ win_dns_record:
+ redirect: community.windows.win_dns_record
+ win_domain_computer:
+ redirect: community.windows.win_domain_computer
+ win_domain_group:
+ redirect: community.windows.win_domain_group
+ win_domain_group_membership:
+ redirect: community.windows.win_domain_group_membership
+ win_domain_user:
+ redirect: community.windows.win_domain_user
+ win_dotnet_ngen:
+ redirect: community.windows.win_dotnet_ngen
+ win_eventlog:
+ redirect: community.windows.win_eventlog
+ win_eventlog_entry:
+ redirect: community.windows.win_eventlog_entry
+ win_file_version:
+ redirect: community.windows.win_file_version
+ win_firewall:
+ redirect: community.windows.win_firewall
+ win_firewall_rule:
+ redirect: community.windows.win_firewall_rule
+ win_format:
+ redirect: community.windows.win_format
+ win_hosts:
+ redirect: community.windows.win_hosts
+ win_hotfix:
+ redirect: community.windows.win_hotfix
+ win_http_proxy:
+ redirect: community.windows.win_http_proxy
+ win_iis_virtualdirectory:
+ redirect: community.windows.win_iis_virtualdirectory
+ win_iis_webapplication:
+ redirect: community.windows.win_iis_webapplication
+ win_iis_webapppool:
+ redirect: community.windows.win_iis_webapppool
+ win_iis_webbinding:
+ redirect: community.windows.win_iis_webbinding
+ win_iis_website:
+ redirect: community.windows.win_iis_website
+ win_inet_proxy:
+ redirect: community.windows.win_inet_proxy
+ win_initialize_disk:
+ redirect: community.windows.win_initialize_disk
+ win_lineinfile:
+ redirect: community.windows.win_lineinfile
+ win_mapped_drive:
+ redirect: community.windows.win_mapped_drive
+ win_msg:
+ redirect: community.windows.win_msg
+ win_netbios:
+ redirect: community.windows.win_netbios
+ win_nssm:
+ redirect: community.windows.win_nssm
+ win_pagefile:
+ redirect: community.windows.win_pagefile
+ win_partition:
+ redirect: community.windows.win_partition
+ win_pester:
+ redirect: community.windows.win_pester
+ win_power_plan:
+ redirect: community.windows.win_power_plan
+ win_product_facts:
+ redirect: community.windows.win_product_facts
+ win_psexec:
+ redirect: community.windows.win_psexec
+ win_psmodule:
+ redirect: community.windows.win_psmodule
+ win_psrepository:
+ redirect: community.windows.win_psrepository
+ win_rabbitmq_plugin:
+ redirect: community.windows.win_rabbitmq_plugin
+ win_rds_cap:
+ redirect: community.windows.win_rds_cap
+ win_rds_rap:
+ redirect: community.windows.win_rds_rap
+ win_rds_settings:
+ redirect: community.windows.win_rds_settings
+ win_region:
+ redirect: community.windows.win_region
+ win_regmerge:
+ redirect: community.windows.win_regmerge
+ win_robocopy:
+ redirect: community.windows.win_robocopy
+ win_route:
+ redirect: community.windows.win_route
+ win_say:
+ redirect: community.windows.win_say
+ win_scheduled_task:
+ redirect: community.windows.win_scheduled_task
+ win_scheduled_task_stat:
+ redirect: community.windows.win_scheduled_task_stat
+ win_security_policy:
+ redirect: community.windows.win_security_policy
+ win_shortcut:
+ redirect: community.windows.win_shortcut
+ win_snmp:
+ redirect: community.windows.win_snmp
+ win_timezone:
+ redirect: community.windows.win_timezone
+ win_toast:
+ redirect: community.windows.win_toast
+ win_unzip:
+ redirect: community.windows.win_unzip
+ win_user_profile:
+ redirect: community.windows.win_user_profile
+ win_wait_for_process:
+ redirect: community.windows.win_wait_for_process
+ win_wakeonlan:
+ redirect: community.windows.win_wakeonlan
+ win_webpicmd:
+ redirect: community.windows.win_webpicmd
+ win_xml:
+ redirect: community.windows.win_xml
+ azure_rm_aks_facts:
+ redirect: community.azure.azure_rm_aks_facts
+ azure_rm_dnsrecordset_facts:
+ redirect: community.azure.azure_rm_dnsrecordset_facts
+ azure_rm_dnszone_facts:
+ redirect: community.azure.azure_rm_dnszone_facts
+ azure_rm_networkinterface_facts:
+ redirect: community.azure.azure_rm_networkinterface_facts
+ azure_rm_publicipaddress_facts:
+ redirect: community.azure.azure_rm_publicipaddress_facts
+ azure_rm_securitygroup_facts:
+ redirect: community.azure.azure_rm_securitygroup_facts
+ azure_rm_storageaccount_facts:
+ redirect: community.azure.azure_rm_storageaccount_facts
+ azure_rm_virtualmachine_facts:
+ redirect: community.azure.azure_rm_virtualmachine_facts
+ azure_rm_virtualnetwork_facts:
+ redirect: community.azure.azure_rm_virtualnetwork_facts
+ azure_rm_roledefinition_facts:
+ redirect: community.azure.azure_rm_roledefinition_facts
+ azure_rm_autoscale_facts:
+ redirect: community.azure.azure_rm_autoscale_facts
+ azure_rm_mysqldatabase_facts:
+ redirect: community.azure.azure_rm_mysqldatabase_facts
+ azure_rm_devtestlabschedule_facts:
+ redirect: community.azure.azure_rm_devtestlabschedule_facts
+ azure_rm_virtualmachinescaleset_facts:
+ redirect: community.azure.azure_rm_virtualmachinescaleset_facts
+ azure_rm_devtestlabcustomimage_facts:
+ redirect: community.azure.azure_rm_devtestlabcustomimage_facts
+ azure_rm_cosmosdbaccount_facts:
+ redirect: community.azure.azure_rm_cosmosdbaccount_facts
+ azure_rm_subnet_facts:
+ redirect: community.azure.azure_rm_subnet_facts
+ azure_rm_aksversion_facts:
+ redirect: community.azure.azure_rm_aksversion_facts
+ azure_rm_hdinsightcluster_facts:
+ redirect: community.azure.azure_rm_hdinsightcluster_facts
+ azure_rm_virtualmachinescalesetextension_facts:
+ redirect: community.azure.azure_rm_virtualmachinescalesetextension_facts
+ azure_rm_loadbalancer_facts:
+ redirect: community.azure.azure_rm_loadbalancer_facts
+ azure_rm_roleassignment_facts:
+ redirect: community.azure.azure_rm_roleassignment_facts
+ azure_rm_manageddisk_facts:
+ redirect: community.azure.azure_rm_manageddisk_facts
+ azure_rm_mysqlserver_facts:
+ redirect: community.azure.azure_rm_mysqlserver_facts
+ azure_rm_servicebus_facts:
+ redirect: community.azure.azure_rm_servicebus_facts
+ azure_rm_rediscache_facts:
+ redirect: community.azure.azure_rm_rediscache_facts
+ azure_rm_resource_facts:
+ redirect: community.azure.azure_rm_resource_facts
+ azure_rm_routetable_facts:
+ redirect: community.azure.azure_rm_routetable_facts
+ azure_rm_virtualmachine_extension:
+ redirect: community.azure.azure_rm_virtualmachine_extension
+ azure_rm_loganalyticsworkspace_facts:
+ redirect: community.azure.azure_rm_loganalyticsworkspace_facts
+ azure_rm_sqldatabase_facts:
+ redirect: community.azure.azure_rm_sqldatabase_facts
+ azure_rm_devtestlabartifactsource_facts:
+ redirect: community.azure.azure_rm_devtestlabartifactsource_facts
+ azure_rm_deployment_facts:
+ redirect: community.azure.azure_rm_deployment_facts
+ azure_rm_virtualmachineextension_facts:
+ redirect: community.azure.azure_rm_virtualmachineextension_facts
+ azure_rm_applicationsecuritygroup_facts:
+ redirect: community.azure.azure_rm_applicationsecuritygroup_facts
+ azure_rm_availabilityset_facts:
+ redirect: community.azure.azure_rm_availabilityset_facts
+ azure_rm_mariadbdatabase_facts:
+ redirect: community.azure.azure_rm_mariadbdatabase_facts
+ azure_rm_devtestlabenvironment_facts:
+ redirect: community.azure.azure_rm_devtestlabenvironment_facts
+ azure_rm_appserviceplan_facts:
+ redirect: community.azure.azure_rm_appserviceplan_facts
+ azure_rm_containerinstance_facts:
+ redirect: community.azure.azure_rm_containerinstance_facts
+ azure_rm_devtestlabarmtemplate_facts:
+ redirect: community.azure.azure_rm_devtestlabarmtemplate_facts
+ azure_rm_devtestlabartifact_facts:
+ redirect: community.azure.azure_rm_devtestlabartifact_facts
+ azure_rm_virtualmachinescalesetinstance_facts:
+ redirect: community.azure.azure_rm_virtualmachinescalesetinstance_facts
+ azure_rm_cdnendpoint_facts:
+ redirect: community.azure.azure_rm_cdnendpoint_facts
+ azure_rm_trafficmanagerprofile_facts:
+ redirect: community.azure.azure_rm_trafficmanagerprofile_facts
+ azure_rm_functionapp_facts:
+ redirect: community.azure.azure_rm_functionapp_facts
+ azure_rm_virtualmachineimage_facts:
+ redirect: community.azure.azure_rm_virtualmachineimage_facts
+ azure_rm_mariadbconfiguration_facts:
+ redirect: community.azure.azure_rm_mariadbconfiguration_facts
+ azure_rm_virtualnetworkpeering_facts:
+ redirect: community.azure.azure_rm_virtualnetworkpeering_facts
+ azure_rm_sqlserver_facts:
+ redirect: community.azure.azure_rm_sqlserver_facts
+ azure_rm_mariadbfirewallrule_facts:
+ redirect: community.azure.azure_rm_mariadbfirewallrule_facts
+ azure_rm_mysqlconfiguration_facts:
+ redirect: community.azure.azure_rm_mysqlconfiguration_facts
+ azure_rm_mysqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_mysqlfirewallrule_facts
+ azure_rm_postgresqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_postgresqlfirewallrule_facts
+ azure_rm_mariadbserver_facts:
+ redirect: community.azure.azure_rm_mariadbserver_facts
+ azure_rm_postgresqldatabase_facts:
+ redirect: community.azure.azure_rm_postgresqldatabase_facts
+ azure_rm_devtestlabvirtualnetwork_facts:
+ redirect: community.azure.azure_rm_devtestlabvirtualnetwork_facts
+ azure_rm_devtestlabpolicy_facts:
+ redirect: community.azure.azure_rm_devtestlabpolicy_facts
+ azure_rm_trafficmanagerendpoint_facts:
+ redirect: community.azure.azure_rm_trafficmanagerendpoint_facts
+ azure_rm_sqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_sqlfirewallrule_facts
+ azure_rm_containerregistry_facts:
+ redirect: community.azure.azure_rm_containerregistry_facts
+ azure_rm_postgresqlconfiguration_facts:
+ redirect: community.azure.azure_rm_postgresqlconfiguration_facts
+ azure_rm_postgresqlserver_facts:
+ redirect: community.azure.azure_rm_postgresqlserver_facts
+ azure_rm_devtestlab_facts:
+ redirect: community.azure.azure_rm_devtestlab_facts
+ azure_rm_cdnprofile_facts:
+ redirect: community.azure.azure_rm_cdnprofile_facts
+ azure_rm_virtualmachine_scaleset:
+ redirect: community.azure.azure_rm_virtualmachine_scaleset
+ azure_rm_webapp_facts:
+ redirect: community.azure.azure_rm_webapp_facts
+ azure_rm_devtestlabvirtualmachine_facts:
+ redirect: community.azure.azure_rm_devtestlabvirtualmachine_facts
+ azure_rm_image_facts:
+ redirect: community.azure.azure_rm_image_facts
+ azure_rm_managed_disk:
+ redirect: community.azure.azure_rm_managed_disk
+ azure_rm_automationaccount_facts:
+ redirect: community.azure.azure_rm_automationaccount_facts
+ azure_rm_lock_facts:
+ redirect: community.azure.azure_rm_lock_facts
+ azure_rm_managed_disk_facts:
+ redirect: community.azure.azure_rm_managed_disk_facts
+ azure_rm_resourcegroup_facts:
+ redirect: community.azure.azure_rm_resourcegroup_facts
+ azure_rm_virtualmachine_scaleset_facts:
+ redirect: community.azure.azure_rm_virtualmachine_scaleset_facts
+ snow_record:
+ redirect: servicenow.servicenow.snow_record
+ snow_record_find:
+ redirect: servicenow.servicenow.snow_record_find
+ aws_az_facts:
+ redirect: amazon.aws.aws_az_facts
+ aws_caller_facts:
+ redirect: amazon.aws.aws_caller_facts
+ cloudformation_facts:
+ redirect: amazon.aws.cloudformation_facts
+ ec2_ami_facts:
+ redirect: amazon.aws.ec2_ami_facts
+ ec2_eni_facts:
+ redirect: amazon.aws.ec2_eni_facts
+ ec2_group_facts:
+ redirect: amazon.aws.ec2_group_facts
+ ec2_snapshot_facts:
+ redirect: amazon.aws.ec2_snapshot_facts
+ ec2_vol_facts:
+ redirect: amazon.aws.ec2_vol_facts
+ ec2_vpc_dhcp_option_facts:
+ redirect: amazon.aws.ec2_vpc_dhcp_option_facts
+ ec2_vpc_net_facts:
+ redirect: amazon.aws.ec2_vpc_net_facts
+ ec2_vpc_subnet_facts:
+ redirect: amazon.aws.ec2_vpc_subnet_facts
+ aws_az_info:
+ redirect: amazon.aws.aws_az_info
+ aws_caller_info:
+ redirect: amazon.aws.aws_caller_info
+ aws_s3:
+ redirect: amazon.aws.aws_s3
+ cloudformation:
+ redirect: amazon.aws.cloudformation
+ cloudformation_info:
+ redirect: amazon.aws.cloudformation_info
+ ec2:
+ redirect: amazon.aws.ec2
+ ec2_ami:
+ redirect: amazon.aws.ec2_ami
+ ec2_ami_info:
+ redirect: amazon.aws.ec2_ami_info
+ ec2_elb_lb:
+ redirect: amazon.aws.ec2_elb_lb
+ ec2_eni:
+ redirect: amazon.aws.ec2_eni
+ ec2_eni_info:
+ redirect: amazon.aws.ec2_eni_info
+ ec2_group:
+ redirect: amazon.aws.ec2_group
+ ec2_group_info:
+ redirect: amazon.aws.ec2_group_info
+ ec2_key:
+ redirect: amazon.aws.ec2_key
+ ec2_metadata_facts:
+ redirect: amazon.aws.ec2_metadata_facts
+ ec2_snapshot:
+ redirect: amazon.aws.ec2_snapshot
+ ec2_snapshot_info:
+ redirect: amazon.aws.ec2_snapshot_info
+ ec2_tag:
+ redirect: amazon.aws.ec2_tag
+ ec2_tag_info:
+ redirect: amazon.aws.ec2_tag_info
+ ec2_vol:
+ redirect: amazon.aws.ec2_vol
+ ec2_vol_info:
+ redirect: amazon.aws.ec2_vol_info
+ ec2_vpc_dhcp_option:
+ redirect: amazon.aws.ec2_vpc_dhcp_option
+ ec2_vpc_dhcp_option_info:
+ redirect: amazon.aws.ec2_vpc_dhcp_option_info
+ ec2_vpc_net:
+ redirect: amazon.aws.ec2_vpc_net
+ ec2_vpc_net_info:
+ redirect: amazon.aws.ec2_vpc_net_info
+ ec2_vpc_subnet:
+ redirect: amazon.aws.ec2_vpc_subnet
+ ec2_vpc_subnet_info:
+ redirect: amazon.aws.ec2_vpc_subnet_info
+ s3_bucket:
+ redirect: amazon.aws.s3_bucket
+ telnet:
+ redirect: ansible.netcommon.telnet
+ cli_command:
+ redirect: ansible.netcommon.cli_command
+ cli_config:
+ redirect: ansible.netcommon.cli_config
+ net_put:
+ redirect: ansible.netcommon.net_put
+ net_get:
+ redirect: ansible.netcommon.net_get
+ net_linkagg:
+ redirect: ansible.netcommon.net_linkagg
+ net_interface:
+ redirect: ansible.netcommon.net_interface
+ net_lldp_interface:
+ redirect: ansible.netcommon.net_lldp_interface
+ net_vlan:
+ redirect: ansible.netcommon.net_vlan
+ net_l2_interface:
+ redirect: ansible.netcommon.net_l2_interface
+ net_l3_interface:
+ redirect: ansible.netcommon.net_l3_interface
+ net_vrf:
+ redirect: ansible.netcommon.net_vrf
+ netconf_config:
+ redirect: ansible.netcommon.netconf_config
+ netconf_rpc:
+ redirect: ansible.netcommon.netconf_rpc
+ netconf_get:
+ redirect: ansible.netcommon.netconf_get
+ net_lldp:
+ redirect: ansible.netcommon.net_lldp
+ restconf_get:
+ redirect: ansible.netcommon.restconf_get
+ restconf_config:
+ redirect: ansible.netcommon.restconf_config
+ net_static_route:
+ redirect: ansible.netcommon.net_static_route
+ net_system:
+ redirect: ansible.netcommon.net_system
+ net_logging:
+ redirect: ansible.netcommon.net_logging
+ net_user:
+ redirect: ansible.netcommon.net_user
+ net_ping:
+ redirect: ansible.netcommon.net_ping
+ net_banner:
+ redirect: ansible.netcommon.net_banner
+ acl:
+ redirect: ansible.posix.acl
+ synchronize:
+ redirect: ansible.posix.synchronize
+ at:
+ redirect: ansible.posix.at
+ authorized_key:
+ redirect: ansible.posix.authorized_key
+ mount:
+ redirect: ansible.posix.mount
+ seboolean:
+ redirect: ansible.posix.seboolean
+ selinux:
+ redirect: ansible.posix.selinux
+ sysctl:
+ redirect: ansible.posix.sysctl
+ async_status.ps1:
+ redirect: ansible.windows.async_status
+ setup.ps1:
+ redirect: ansible.windows.setup
+ slurp.ps1:
+ redirect: ansible.windows.slurp
+ win_acl:
+ redirect: ansible.windows.win_acl
+ win_acl_inheritance:
+ redirect: ansible.windows.win_acl_inheritance
+ win_certificate_store:
+ redirect: ansible.windows.win_certificate_store
+ win_command:
+ redirect: ansible.windows.win_command
+ win_copy:
+ redirect: ansible.windows.win_copy
+ win_dns_client:
+ redirect: ansible.windows.win_dns_client
+ win_domain:
+ redirect: ansible.windows.win_domain
+ win_domain_controller:
+ redirect: ansible.windows.win_domain_controller
+ win_domain_membership:
+ redirect: ansible.windows.win_domain_membership
+ win_dsc:
+ redirect: ansible.windows.win_dsc
+ win_environment:
+ redirect: ansible.windows.win_environment
+ win_feature:
+ redirect: ansible.windows.win_feature
+ win_file:
+ redirect: ansible.windows.win_file
+ win_find:
+ redirect: ansible.windows.win_find
+ win_get_url:
+ redirect: ansible.windows.win_get_url
+ win_group:
+ redirect: ansible.windows.win_group
+ win_group_membership:
+ redirect: ansible.windows.win_group_membership
+ win_hostname:
+ redirect: ansible.windows.win_hostname
+ win_optional_feature:
+ redirect: ansible.windows.win_optional_feature
+ win_owner:
+ redirect: ansible.windows.win_owner
+ win_package:
+ redirect: ansible.windows.win_package
+ win_path:
+ redirect: ansible.windows.win_path
+ win_ping:
+ redirect: ansible.windows.win_ping
+ win_reboot:
+ redirect: ansible.windows.win_reboot
+ win_reg_stat:
+ redirect: ansible.windows.win_reg_stat
+ win_regedit:
+ redirect: ansible.windows.win_regedit
+ win_service:
+ redirect: ansible.windows.win_service
+ win_share:
+ redirect: ansible.windows.win_share
+ win_shell:
+ redirect: ansible.windows.win_shell
+ win_stat:
+ redirect: ansible.windows.win_stat
+ win_tempfile:
+ redirect: ansible.windows.win_tempfile
+ win_template:
+ redirect: ansible.windows.win_template
+ win_updates:
+ redirect: ansible.windows.win_updates
+ win_uri:
+ redirect: ansible.windows.win_uri
+ win_user:
+ redirect: ansible.windows.win_user
+ win_user_right:
+ redirect: ansible.windows.win_user_right
+ win_wait_for:
+ redirect: ansible.windows.win_wait_for
+ win_whoami:
+ redirect: ansible.windows.win_whoami
+ fortios_address:
+ redirect: fortinet.fortios.fortios_address
+ fortios_alertemail_setting:
+ redirect: fortinet.fortios.fortios_alertemail_setting
+ fortios_antivirus_heuristic:
+ redirect: fortinet.fortios.fortios_antivirus_heuristic
+ fortios_antivirus_profile:
+ redirect: fortinet.fortios.fortios_antivirus_profile
+ fortios_antivirus_quarantine:
+ redirect: fortinet.fortios.fortios_antivirus_quarantine
+ fortios_antivirus_settings:
+ redirect: fortinet.fortios.fortios_antivirus_settings
+ fortios_application_custom:
+ redirect: fortinet.fortios.fortios_application_custom
+ fortios_application_group:
+ redirect: fortinet.fortios.fortios_application_group
+ fortios_application_list:
+ redirect: fortinet.fortios.fortios_application_list
+ fortios_application_name:
+ redirect: fortinet.fortios.fortios_application_name
+ fortios_application_rule_settings:
+ redirect: fortinet.fortios.fortios_application_rule_settings
+ fortios_authentication_rule:
+ redirect: fortinet.fortios.fortios_authentication_rule
+ fortios_authentication_scheme:
+ redirect: fortinet.fortios.fortios_authentication_scheme
+ fortios_authentication_setting:
+ redirect: fortinet.fortios.fortios_authentication_setting
+ fortios_config:
+ redirect: fortinet.fortios.fortios_config
+ fortios_dlp_filepattern:
+ redirect: fortinet.fortios.fortios_dlp_filepattern
+ fortios_dlp_fp_doc_source:
+ redirect: fortinet.fortios.fortios_dlp_fp_doc_source
+ fortios_dlp_fp_sensitivity:
+ redirect: fortinet.fortios.fortios_dlp_fp_sensitivity
+ fortios_dlp_sensor:
+ redirect: fortinet.fortios.fortios_dlp_sensor
+ fortios_dlp_settings:
+ redirect: fortinet.fortios.fortios_dlp_settings
+ fortios_dnsfilter_domain_filter:
+ redirect: fortinet.fortios.fortios_dnsfilter_domain_filter
+ fortios_dnsfilter_profile:
+ redirect: fortinet.fortios.fortios_dnsfilter_profile
+ fortios_endpoint_control_client:
+ redirect: fortinet.fortios.fortios_endpoint_control_client
+ fortios_endpoint_control_forticlient_ems:
+ redirect: fortinet.fortios.fortios_endpoint_control_forticlient_ems
+ fortios_endpoint_control_forticlient_registration_sync:
+ redirect: fortinet.fortios.fortios_endpoint_control_forticlient_registration_sync
+ fortios_endpoint_control_profile:
+ redirect: fortinet.fortios.fortios_endpoint_control_profile
+ fortios_endpoint_control_settings:
+ redirect: fortinet.fortios.fortios_endpoint_control_settings
+ fortios_extender_controller_extender:
+ redirect: fortinet.fortios.fortios_extender_controller_extender
+ fortios_facts:
+ redirect: fortinet.fortios.fortios_facts
+ fortios_firewall_address:
+ redirect: fortinet.fortios.fortios_firewall_address
+ fortios_firewall_address6:
+ redirect: fortinet.fortios.fortios_firewall_address6
+ fortios_firewall_address6_template:
+ redirect: fortinet.fortios.fortios_firewall_address6_template
+ fortios_firewall_addrgrp:
+ redirect: fortinet.fortios.fortios_firewall_addrgrp
+ fortios_firewall_addrgrp6:
+ redirect: fortinet.fortios.fortios_firewall_addrgrp6
+ fortios_firewall_auth_portal:
+ redirect: fortinet.fortios.fortios_firewall_auth_portal
+ fortios_firewall_central_snat_map:
+ redirect: fortinet.fortios.fortios_firewall_central_snat_map
+ fortios_firewall_DoS_policy:
+ redirect: fortinet.fortios.fortios_firewall_DoS_policy
+ fortios_firewall_DoS_policy6:
+ redirect: fortinet.fortios.fortios_firewall_DoS_policy6
+ fortios_firewall_dnstranslation:
+ redirect: fortinet.fortios.fortios_firewall_dnstranslation
+ fortios_firewall_identity_based_route:
+ redirect: fortinet.fortios.fortios_firewall_identity_based_route
+ fortios_firewall_interface_policy:
+ redirect: fortinet.fortios.fortios_firewall_interface_policy
+ fortios_firewall_interface_policy6:
+ redirect: fortinet.fortios.fortios_firewall_interface_policy6
+ fortios_firewall_internet_service:
+ redirect: fortinet.fortios.fortios_firewall_internet_service
+ fortios_firewall_internet_service_custom:
+ redirect: fortinet.fortios.fortios_firewall_internet_service_custom
+ fortios_firewall_internet_service_group:
+ redirect: fortinet.fortios.fortios_firewall_internet_service_group
+ fortios_firewall_ip_translation:
+ redirect: fortinet.fortios.fortios_firewall_ip_translation
+ fortios_firewall_ipmacbinding_setting:
+ redirect: fortinet.fortios.fortios_firewall_ipmacbinding_setting
+ fortios_firewall_ipmacbinding_table:
+ redirect: fortinet.fortios.fortios_firewall_ipmacbinding_table
+ fortios_firewall_ippool:
+ redirect: fortinet.fortios.fortios_firewall_ippool
+ fortios_firewall_ippool6:
+ redirect: fortinet.fortios.fortios_firewall_ippool6
+ fortios_firewall_ipv6_eh_filter:
+ redirect: fortinet.fortios.fortios_firewall_ipv6_eh_filter
+ fortios_firewall_ldb_monitor:
+ redirect: fortinet.fortios.fortios_firewall_ldb_monitor
+ fortios_firewall_local_in_policy:
+ redirect: fortinet.fortios.fortios_firewall_local_in_policy
+ fortios_firewall_local_in_policy6:
+ redirect: fortinet.fortios.fortios_firewall_local_in_policy6
+ fortios_firewall_multicast_address:
+ redirect: fortinet.fortios.fortios_firewall_multicast_address
+ fortios_firewall_multicast_address6:
+ redirect: fortinet.fortios.fortios_firewall_multicast_address6
+ fortios_firewall_multicast_policy:
+ redirect: fortinet.fortios.fortios_firewall_multicast_policy
+ fortios_firewall_multicast_policy6:
+ redirect: fortinet.fortios.fortios_firewall_multicast_policy6
+ fortios_firewall_policy:
+ redirect: fortinet.fortios.fortios_firewall_policy
+ fortios_firewall_policy46:
+ redirect: fortinet.fortios.fortios_firewall_policy46
+ fortios_firewall_policy6:
+ redirect: fortinet.fortios.fortios_firewall_policy6
+ fortios_firewall_policy64:
+ redirect: fortinet.fortios.fortios_firewall_policy64
+ fortios_firewall_profile_group:
+ redirect: fortinet.fortios.fortios_firewall_profile_group
+ fortios_firewall_profile_protocol_options:
+ redirect: fortinet.fortios.fortios_firewall_profile_protocol_options
+ fortios_firewall_proxy_address:
+ redirect: fortinet.fortios.fortios_firewall_proxy_address
+ fortios_firewall_proxy_addrgrp:
+ redirect: fortinet.fortios.fortios_firewall_proxy_addrgrp
+ fortios_firewall_proxy_policy:
+ redirect: fortinet.fortios.fortios_firewall_proxy_policy
+ fortios_firewall_schedule_group:
+ redirect: fortinet.fortios.fortios_firewall_schedule_group
+ fortios_firewall_schedule_onetime:
+ redirect: fortinet.fortios.fortios_firewall_schedule_onetime
+ fortios_firewall_schedule_recurring:
+ redirect: fortinet.fortios.fortios_firewall_schedule_recurring
+ fortios_firewall_service_category:
+ redirect: fortinet.fortios.fortios_firewall_service_category
+ fortios_firewall_service_custom:
+ redirect: fortinet.fortios.fortios_firewall_service_custom
+ fortios_firewall_service_group:
+ redirect: fortinet.fortios.fortios_firewall_service_group
+ fortios_firewall_shaper_per_ip_shaper:
+ redirect: fortinet.fortios.fortios_firewall_shaper_per_ip_shaper
+ fortios_firewall_shaper_traffic_shaper:
+ redirect: fortinet.fortios.fortios_firewall_shaper_traffic_shaper
+ fortios_firewall_shaping_policy:
+ redirect: fortinet.fortios.fortios_firewall_shaping_policy
+ fortios_firewall_shaping_profile:
+ redirect: fortinet.fortios.fortios_firewall_shaping_profile
+ fortios_firewall_sniffer:
+ redirect: fortinet.fortios.fortios_firewall_sniffer
+ fortios_firewall_ssh_host_key:
+ redirect: fortinet.fortios.fortios_firewall_ssh_host_key
+ fortios_firewall_ssh_local_ca:
+ redirect: fortinet.fortios.fortios_firewall_ssh_local_ca
+ fortios_firewall_ssh_local_key:
+ redirect: fortinet.fortios.fortios_firewall_ssh_local_key
+ fortios_firewall_ssh_setting:
+ redirect: fortinet.fortios.fortios_firewall_ssh_setting
+ fortios_firewall_ssl_server:
+ redirect: fortinet.fortios.fortios_firewall_ssl_server
+ fortios_firewall_ssl_setting:
+ redirect: fortinet.fortios.fortios_firewall_ssl_setting
+ fortios_firewall_ssl_ssh_profile:
+ redirect: fortinet.fortios.fortios_firewall_ssl_ssh_profile
+ fortios_firewall_ttl_policy:
+ redirect: fortinet.fortios.fortios_firewall_ttl_policy
+ fortios_firewall_vip:
+ redirect: fortinet.fortios.fortios_firewall_vip
+ fortios_firewall_vip46:
+ redirect: fortinet.fortios.fortios_firewall_vip46
+ fortios_firewall_vip6:
+ redirect: fortinet.fortios.fortios_firewall_vip6
+ fortios_firewall_vip64:
+ redirect: fortinet.fortios.fortios_firewall_vip64
+ fortios_firewall_vipgrp:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp
+ fortios_firewall_vipgrp46:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp46
+ fortios_firewall_vipgrp6:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp6
+ fortios_firewall_vipgrp64:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp64
+ fortios_firewall_wildcard_fqdn_custom:
+ redirect: fortinet.fortios.fortios_firewall_wildcard_fqdn_custom
+ fortios_firewall_wildcard_fqdn_group:
+ redirect: fortinet.fortios.fortios_firewall_wildcard_fqdn_group
+ fortios_ftp_proxy_explicit:
+ redirect: fortinet.fortios.fortios_ftp_proxy_explicit
+ fortios_icap_profile:
+ redirect: fortinet.fortios.fortios_icap_profile
+ fortios_icap_server:
+ redirect: fortinet.fortios.fortios_icap_server
+ fortios_ips_custom:
+ redirect: fortinet.fortios.fortios_ips_custom
+ fortios_ips_decoder:
+ redirect: fortinet.fortios.fortios_ips_decoder
+ fortios_ips_global:
+ redirect: fortinet.fortios.fortios_ips_global
+ fortios_ips_rule:
+ redirect: fortinet.fortios.fortios_ips_rule
+ fortios_ips_rule_settings:
+ redirect: fortinet.fortios.fortios_ips_rule_settings
+ fortios_ips_sensor:
+ redirect: fortinet.fortios.fortios_ips_sensor
+ fortios_ips_settings:
+ redirect: fortinet.fortios.fortios_ips_settings
+ fortios_ipv4_policy:
+ redirect: fortinet.fortios.fortios_ipv4_policy
+ fortios_log_custom_field:
+ redirect: fortinet.fortios.fortios_log_custom_field
+ fortios_log_disk_filter:
+ redirect: fortinet.fortios.fortios_log_disk_filter
+ fortios_log_disk_setting:
+ redirect: fortinet.fortios.fortios_log_disk_setting
+ fortios_log_eventfilter:
+ redirect: fortinet.fortios.fortios_log_eventfilter
+ fortios_log_fortianalyzer2_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer2_filter
+ fortios_log_fortianalyzer2_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer2_setting
+ fortios_log_fortianalyzer3_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer3_filter
+ fortios_log_fortianalyzer3_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer3_setting
+ fortios_log_fortianalyzer_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_filter
+ fortios_log_fortianalyzer_override_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_override_filter
+ fortios_log_fortianalyzer_override_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_override_setting
+ fortios_log_fortianalyzer_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_setting
+ fortios_log_fortiguard_filter:
+ redirect: fortinet.fortios.fortios_log_fortiguard_filter
+ fortios_log_fortiguard_override_filter:
+ redirect: fortinet.fortios.fortios_log_fortiguard_override_filter
+ fortios_log_fortiguard_override_setting:
+ redirect: fortinet.fortios.fortios_log_fortiguard_override_setting
+ fortios_log_fortiguard_setting:
+ redirect: fortinet.fortios.fortios_log_fortiguard_setting
+ fortios_log_gui_display:
+ redirect: fortinet.fortios.fortios_log_gui_display
+ fortios_log_memory_filter:
+ redirect: fortinet.fortios.fortios_log_memory_filter
+ fortios_log_memory_global_setting:
+ redirect: fortinet.fortios.fortios_log_memory_global_setting
+ fortios_log_memory_setting:
+ redirect: fortinet.fortios.fortios_log_memory_setting
+ fortios_log_null_device_filter:
+ redirect: fortinet.fortios.fortios_log_null_device_filter
+ fortios_log_null_device_setting:
+ redirect: fortinet.fortios.fortios_log_null_device_setting
+ fortios_log_setting:
+ redirect: fortinet.fortios.fortios_log_setting
+ fortios_log_syslogd2_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd2_filter
+ fortios_log_syslogd2_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd2_setting
+ fortios_log_syslogd3_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd3_filter
+ fortios_log_syslogd3_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd3_setting
+ fortios_log_syslogd4_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd4_filter
+ fortios_log_syslogd4_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd4_setting
+ fortios_log_syslogd_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd_filter
+ fortios_log_syslogd_override_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd_override_filter
+ fortios_log_syslogd_override_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd_override_setting
+ fortios_log_syslogd_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd_setting
+ fortios_log_threat_weight:
+ redirect: fortinet.fortios.fortios_log_threat_weight
+ fortios_log_webtrends_filter:
+ redirect: fortinet.fortios.fortios_log_webtrends_filter
+ fortios_log_webtrends_setting:
+ redirect: fortinet.fortios.fortios_log_webtrends_setting
+ fortios_report_chart:
+ redirect: fortinet.fortios.fortios_report_chart
+ fortios_report_dataset:
+ redirect: fortinet.fortios.fortios_report_dataset
+ fortios_report_layout:
+ redirect: fortinet.fortios.fortios_report_layout
+ fortios_report_setting:
+ redirect: fortinet.fortios.fortios_report_setting
+ fortios_report_style:
+ redirect: fortinet.fortios.fortios_report_style
+ fortios_report_theme:
+ redirect: fortinet.fortios.fortios_report_theme
+ fortios_router_access_list:
+ redirect: fortinet.fortios.fortios_router_access_list
+ fortios_router_access_list6:
+ redirect: fortinet.fortios.fortios_router_access_list6
+ fortios_router_aspath_list:
+ redirect: fortinet.fortios.fortios_router_aspath_list
+ fortios_router_auth_path:
+ redirect: fortinet.fortios.fortios_router_auth_path
+ fortios_router_bfd:
+ redirect: fortinet.fortios.fortios_router_bfd
+ fortios_router_bfd6:
+ redirect: fortinet.fortios.fortios_router_bfd6
+ fortios_router_bgp:
+ redirect: fortinet.fortios.fortios_router_bgp
+ fortios_router_community_list:
+ redirect: fortinet.fortios.fortios_router_community_list
+ fortios_router_isis:
+ redirect: fortinet.fortios.fortios_router_isis
+ fortios_router_key_chain:
+ redirect: fortinet.fortios.fortios_router_key_chain
+ fortios_router_multicast:
+ redirect: fortinet.fortios.fortios_router_multicast
+ fortios_router_multicast6:
+ redirect: fortinet.fortios.fortios_router_multicast6
+ fortios_router_multicast_flow:
+ redirect: fortinet.fortios.fortios_router_multicast_flow
+ fortios_router_ospf:
+ redirect: fortinet.fortios.fortios_router_ospf
+ fortios_router_ospf6:
+ redirect: fortinet.fortios.fortios_router_ospf6
+ fortios_router_policy:
+ redirect: fortinet.fortios.fortios_router_policy
+ fortios_router_policy6:
+ redirect: fortinet.fortios.fortios_router_policy6
+ fortios_router_prefix_list:
+ redirect: fortinet.fortios.fortios_router_prefix_list
+ fortios_router_prefix_list6:
+ redirect: fortinet.fortios.fortios_router_prefix_list6
+ fortios_router_rip:
+ redirect: fortinet.fortios.fortios_router_rip
+ fortios_router_ripng:
+ redirect: fortinet.fortios.fortios_router_ripng
+ fortios_router_route_map:
+ redirect: fortinet.fortios.fortios_router_route_map
+ fortios_router_setting:
+ redirect: fortinet.fortios.fortios_router_setting
+ fortios_router_static:
+ redirect: fortinet.fortios.fortios_router_static
+ fortios_router_static6:
+ redirect: fortinet.fortios.fortios_router_static6
+ fortios_spamfilter_bwl:
+ redirect: fortinet.fortios.fortios_spamfilter_bwl
+ fortios_spamfilter_bword:
+ redirect: fortinet.fortios.fortios_spamfilter_bword
+ fortios_spamfilter_dnsbl:
+ redirect: fortinet.fortios.fortios_spamfilter_dnsbl
+ fortios_spamfilter_fortishield:
+ redirect: fortinet.fortios.fortios_spamfilter_fortishield
+ fortios_spamfilter_iptrust:
+ redirect: fortinet.fortios.fortios_spamfilter_iptrust
+ fortios_spamfilter_mheader:
+ redirect: fortinet.fortios.fortios_spamfilter_mheader
+ fortios_spamfilter_options:
+ redirect: fortinet.fortios.fortios_spamfilter_options
+ fortios_spamfilter_profile:
+ redirect: fortinet.fortios.fortios_spamfilter_profile
+ fortios_ssh_filter_profile:
+ redirect: fortinet.fortios.fortios_ssh_filter_profile
+ fortios_switch_controller_802_1X_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_802_1X_settings
+ fortios_switch_controller_custom_command:
+ redirect: fortinet.fortios.fortios_switch_controller_custom_command
+ fortios_switch_controller_global:
+ redirect: fortinet.fortios.fortios_switch_controller_global
+ fortios_switch_controller_igmp_snooping:
+ redirect: fortinet.fortios.fortios_switch_controller_igmp_snooping
+ fortios_switch_controller_lldp_profile:
+ redirect: fortinet.fortios.fortios_switch_controller_lldp_profile
+ fortios_switch_controller_lldp_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_lldp_settings
+ fortios_switch_controller_mac_sync_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_mac_sync_settings
+ fortios_switch_controller_managed_switch:
+ redirect: fortinet.fortios.fortios_switch_controller_managed_switch
+ fortios_switch_controller_network_monitor_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_network_monitor_settings
+ fortios_switch_controller_qos_dot1p_map:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_dot1p_map
+ fortios_switch_controller_qos_ip_dscp_map:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_ip_dscp_map
+ fortios_switch_controller_qos_qos_policy:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_qos_policy
+ fortios_switch_controller_qos_queue_policy:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_queue_policy
+ fortios_switch_controller_quarantine:
+ redirect: fortinet.fortios.fortios_switch_controller_quarantine
+ fortios_switch_controller_security_policy_802_1X:
+ redirect: fortinet.fortios.fortios_switch_controller_security_policy_802_1X
+ fortios_switch_controller_security_policy_captive_portal:
+ redirect: fortinet.fortios.fortios_switch_controller_security_policy_captive_portal
+ fortios_switch_controller_sflow:
+ redirect: fortinet.fortios.fortios_switch_controller_sflow
+ fortios_switch_controller_storm_control:
+ redirect: fortinet.fortios.fortios_switch_controller_storm_control
+ fortios_switch_controller_stp_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_stp_settings
+ fortios_switch_controller_switch_group:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_group
+ fortios_switch_controller_switch_interface_tag:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_interface_tag
+ fortios_switch_controller_switch_log:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_log
+ fortios_switch_controller_switch_profile:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_profile
+ fortios_switch_controller_system:
+ redirect: fortinet.fortios.fortios_switch_controller_system
+ fortios_switch_controller_virtual_port_pool:
+ redirect: fortinet.fortios.fortios_switch_controller_virtual_port_pool
+ fortios_switch_controller_vlan:
+ redirect: fortinet.fortios.fortios_switch_controller_vlan
+ fortios_system_accprofile:
+ redirect: fortinet.fortios.fortios_system_accprofile
+ fortios_system_admin:
+ redirect: fortinet.fortios.fortios_system_admin
+ fortios_system_affinity_interrupt:
+ redirect: fortinet.fortios.fortios_system_affinity_interrupt
+ fortios_system_affinity_packet_redistribution:
+ redirect: fortinet.fortios.fortios_system_affinity_packet_redistribution
+ fortios_system_alarm:
+ redirect: fortinet.fortios.fortios_system_alarm
+ fortios_system_alias:
+ redirect: fortinet.fortios.fortios_system_alias
+ fortios_system_api_user:
+ redirect: fortinet.fortios.fortios_system_api_user
+ fortios_system_arp_table:
+ redirect: fortinet.fortios.fortios_system_arp_table
+ fortios_system_auto_install:
+ redirect: fortinet.fortios.fortios_system_auto_install
+ fortios_system_auto_script:
+ redirect: fortinet.fortios.fortios_system_auto_script
+ fortios_system_automation_action:
+ redirect: fortinet.fortios.fortios_system_automation_action
+ fortios_system_automation_destination:
+ redirect: fortinet.fortios.fortios_system_automation_destination
+ fortios_system_automation_stitch:
+ redirect: fortinet.fortios.fortios_system_automation_stitch
+ fortios_system_automation_trigger:
+ redirect: fortinet.fortios.fortios_system_automation_trigger
+ fortios_system_autoupdate_push_update:
+ redirect: fortinet.fortios.fortios_system_autoupdate_push_update
+ fortios_system_autoupdate_schedule:
+ redirect: fortinet.fortios.fortios_system_autoupdate_schedule
+ fortios_system_autoupdate_tunneling:
+ redirect: fortinet.fortios.fortios_system_autoupdate_tunneling
+ fortios_system_central_management:
+ redirect: fortinet.fortios.fortios_system_central_management
+ fortios_system_cluster_sync:
+ redirect: fortinet.fortios.fortios_system_cluster_sync
+ fortios_system_console:
+ redirect: fortinet.fortios.fortios_system_console
+ fortios_system_csf:
+ redirect: fortinet.fortios.fortios_system_csf
+ fortios_system_custom_language:
+ redirect: fortinet.fortios.fortios_system_custom_language
+ fortios_system_ddns:
+ redirect: fortinet.fortios.fortios_system_ddns
+ fortios_system_dedicated_mgmt:
+ redirect: fortinet.fortios.fortios_system_dedicated_mgmt
+ fortios_system_dhcp6_server:
+ redirect: fortinet.fortios.fortios_system_dhcp6_server
+ fortios_system_dhcp_server:
+ redirect: fortinet.fortios.fortios_system_dhcp_server
+ fortios_system_dns:
+ redirect: fortinet.fortios.fortios_system_dns
+ fortios_system_dns_database:
+ redirect: fortinet.fortios.fortios_system_dns_database
+ fortios_system_dns_server:
+ redirect: fortinet.fortios.fortios_system_dns_server
+ fortios_system_dscp_based_priority:
+ redirect: fortinet.fortios.fortios_system_dscp_based_priority
+ fortios_system_email_server:
+ redirect: fortinet.fortios.fortios_system_email_server
+ fortios_system_external_resource:
+ redirect: fortinet.fortios.fortios_system_external_resource
+ fortios_system_fips_cc:
+ redirect: fortinet.fortios.fortios_system_fips_cc
+ fortios_system_firmware_upgrade:
+ redirect: fortinet.fortios.fortios_system_firmware_upgrade
+ fortios_system_fm:
+ redirect: fortinet.fortios.fortios_system_fm
+ fortios_system_fortiguard:
+ redirect: fortinet.fortios.fortios_system_fortiguard
+ fortios_system_fortimanager:
+ redirect: fortinet.fortios.fortios_system_fortimanager
+ fortios_system_fortisandbox:
+ redirect: fortinet.fortios.fortios_system_fortisandbox
+ fortios_system_fsso_polling:
+ redirect: fortinet.fortios.fortios_system_fsso_polling
+ fortios_system_ftm_push:
+ redirect: fortinet.fortios.fortios_system_ftm_push
+ fortios_system_geoip_override:
+ redirect: fortinet.fortios.fortios_system_geoip_override
+ fortios_system_global:
+ redirect: fortinet.fortios.fortios_system_global
+ fortios_system_gre_tunnel:
+ redirect: fortinet.fortios.fortios_system_gre_tunnel
+ fortios_system_ha:
+ redirect: fortinet.fortios.fortios_system_ha
+ fortios_system_ha_monitor:
+ redirect: fortinet.fortios.fortios_system_ha_monitor
+ fortios_system_interface:
+ redirect: fortinet.fortios.fortios_system_interface
+ fortios_system_ipip_tunnel:
+ redirect: fortinet.fortios.fortios_system_ipip_tunnel
+ fortios_system_ips_urlfilter_dns:
+ redirect: fortinet.fortios.fortios_system_ips_urlfilter_dns
+ fortios_system_ips_urlfilter_dns6:
+ redirect: fortinet.fortios.fortios_system_ips_urlfilter_dns6
+ fortios_system_ipv6_neighbor_cache:
+ redirect: fortinet.fortios.fortios_system_ipv6_neighbor_cache
+ fortios_system_ipv6_tunnel:
+ redirect: fortinet.fortios.fortios_system_ipv6_tunnel
+ fortios_system_link_monitor:
+ redirect: fortinet.fortios.fortios_system_link_monitor
+ fortios_system_mac_address_table:
+ redirect: fortinet.fortios.fortios_system_mac_address_table
+ fortios_system_management_tunnel:
+ redirect: fortinet.fortios.fortios_system_management_tunnel
+ fortios_system_mobile_tunnel:
+ redirect: fortinet.fortios.fortios_system_mobile_tunnel
+ fortios_system_nat64:
+ redirect: fortinet.fortios.fortios_system_nat64
+ fortios_system_nd_proxy:
+ redirect: fortinet.fortios.fortios_system_nd_proxy
+ fortios_system_netflow:
+ redirect: fortinet.fortios.fortios_system_netflow
+ fortios_system_network_visibility:
+ redirect: fortinet.fortios.fortios_system_network_visibility
+ fortios_system_ntp:
+ redirect: fortinet.fortios.fortios_system_ntp
+ fortios_system_object_tagging:
+ redirect: fortinet.fortios.fortios_system_object_tagging
+ fortios_system_password_policy:
+ redirect: fortinet.fortios.fortios_system_password_policy
+ fortios_system_password_policy_guest_admin:
+ redirect: fortinet.fortios.fortios_system_password_policy_guest_admin
+ fortios_system_pppoe_interface:
+ redirect: fortinet.fortios.fortios_system_pppoe_interface
+ fortios_system_probe_response:
+ redirect: fortinet.fortios.fortios_system_probe_response
+ fortios_system_proxy_arp:
+ redirect: fortinet.fortios.fortios_system_proxy_arp
+ fortios_system_replacemsg_admin:
+ redirect: fortinet.fortios.fortios_system_replacemsg_admin
+ fortios_system_replacemsg_alertmail:
+ redirect: fortinet.fortios.fortios_system_replacemsg_alertmail
+ fortios_system_replacemsg_auth:
+ redirect: fortinet.fortios.fortios_system_replacemsg_auth
+ fortios_system_replacemsg_device_detection_portal:
+ redirect: fortinet.fortios.fortios_system_replacemsg_device_detection_portal
+ fortios_system_replacemsg_ec:
+ redirect: fortinet.fortios.fortios_system_replacemsg_ec
+ fortios_system_replacemsg_fortiguard_wf:
+ redirect: fortinet.fortios.fortios_system_replacemsg_fortiguard_wf
+ fortios_system_replacemsg_ftp:
+ redirect: fortinet.fortios.fortios_system_replacemsg_ftp
+ fortios_system_replacemsg_group:
+ redirect: fortinet.fortios.fortios_system_replacemsg_group
+ fortios_system_replacemsg_http:
+ redirect: fortinet.fortios.fortios_system_replacemsg_http
+ fortios_system_replacemsg_icap:
+ redirect: fortinet.fortios.fortios_system_replacemsg_icap
+ fortios_system_replacemsg_image:
+ redirect: fortinet.fortios.fortios_system_replacemsg_image
+ fortios_system_replacemsg_mail:
+ redirect: fortinet.fortios.fortios_system_replacemsg_mail
+ fortios_system_replacemsg_nac_quar:
+ redirect: fortinet.fortios.fortios_system_replacemsg_nac_quar
+ fortios_system_replacemsg_nntp:
+ redirect: fortinet.fortios.fortios_system_replacemsg_nntp
+ fortios_system_replacemsg_spam:
+ redirect: fortinet.fortios.fortios_system_replacemsg_spam
+ fortios_system_replacemsg_sslvpn:
+ redirect: fortinet.fortios.fortios_system_replacemsg_sslvpn
+ fortios_system_replacemsg_traffic_quota:
+ redirect: fortinet.fortios.fortios_system_replacemsg_traffic_quota
+ fortios_system_replacemsg_utm:
+ redirect: fortinet.fortios.fortios_system_replacemsg_utm
+ fortios_system_replacemsg_webproxy:
+ redirect: fortinet.fortios.fortios_system_replacemsg_webproxy
+ fortios_system_resource_limits:
+ redirect: fortinet.fortios.fortios_system_resource_limits
+ fortios_system_sdn_connector:
+ redirect: fortinet.fortios.fortios_system_sdn_connector
+ fortios_system_session_helper:
+ redirect: fortinet.fortios.fortios_system_session_helper
+ fortios_system_session_ttl:
+ redirect: fortinet.fortios.fortios_system_session_ttl
+ fortios_system_settings:
+ redirect: fortinet.fortios.fortios_system_settings
+ fortios_system_sflow:
+ redirect: fortinet.fortios.fortios_system_sflow
+ fortios_system_sit_tunnel:
+ redirect: fortinet.fortios.fortios_system_sit_tunnel
+ fortios_system_sms_server:
+ redirect: fortinet.fortios.fortios_system_sms_server
+ fortios_system_snmp_community:
+ redirect: fortinet.fortios.fortios_system_snmp_community
+ fortios_system_snmp_sysinfo:
+ redirect: fortinet.fortios.fortios_system_snmp_sysinfo
+ fortios_system_snmp_user:
+ redirect: fortinet.fortios.fortios_system_snmp_user
+ fortios_system_storage:
+ redirect: fortinet.fortios.fortios_system_storage
+ fortios_system_switch_interface:
+ redirect: fortinet.fortios.fortios_system_switch_interface
+ fortios_system_tos_based_priority:
+ redirect: fortinet.fortios.fortios_system_tos_based_priority
+ fortios_system_vdom:
+ redirect: fortinet.fortios.fortios_system_vdom
+ fortios_system_vdom_dns:
+ redirect: fortinet.fortios.fortios_system_vdom_dns
+ fortios_system_vdom_exception:
+ redirect: fortinet.fortios.fortios_system_vdom_exception
+ fortios_system_vdom_link:
+ redirect: fortinet.fortios.fortios_system_vdom_link
+ fortios_system_vdom_netflow:
+ redirect: fortinet.fortios.fortios_system_vdom_netflow
+ fortios_system_vdom_property:
+ redirect: fortinet.fortios.fortios_system_vdom_property
+ fortios_system_vdom_radius_server:
+ redirect: fortinet.fortios.fortios_system_vdom_radius_server
+ fortios_system_vdom_sflow:
+ redirect: fortinet.fortios.fortios_system_vdom_sflow
+ fortios_system_virtual_wan_link:
+ redirect: fortinet.fortios.fortios_system_virtual_wan_link
+ fortios_system_virtual_wire_pair:
+ redirect: fortinet.fortios.fortios_system_virtual_wire_pair
+ fortios_system_vxlan:
+ redirect: fortinet.fortios.fortios_system_vxlan
+ fortios_system_wccp:
+ redirect: fortinet.fortios.fortios_system_wccp
+ fortios_system_zone:
+ redirect: fortinet.fortios.fortios_system_zone
+ fortios_user_adgrp:
+ redirect: fortinet.fortios.fortios_user_adgrp
+ fortios_user_device:
+ redirect: fortinet.fortios.fortios_user_device
+ fortios_user_device_access_list:
+ redirect: fortinet.fortios.fortios_user_device_access_list
+ fortios_user_device_category:
+ redirect: fortinet.fortios.fortios_user_device_category
+ fortios_user_device_group:
+ redirect: fortinet.fortios.fortios_user_device_group
+ fortios_user_domain_controller:
+ redirect: fortinet.fortios.fortios_user_domain_controller
+ fortios_user_fortitoken:
+ redirect: fortinet.fortios.fortios_user_fortitoken
+ fortios_user_fsso:
+ redirect: fortinet.fortios.fortios_user_fsso
+ fortios_user_fsso_polling:
+ redirect: fortinet.fortios.fortios_user_fsso_polling
+ fortios_user_group:
+ redirect: fortinet.fortios.fortios_user_group
+ fortios_user_krb_keytab:
+ redirect: fortinet.fortios.fortios_user_krb_keytab
+ fortios_user_ldap:
+ redirect: fortinet.fortios.fortios_user_ldap
+ fortios_user_local:
+ redirect: fortinet.fortios.fortios_user_local
+ fortios_user_password_policy:
+ redirect: fortinet.fortios.fortios_user_password_policy
+ fortios_user_peer:
+ redirect: fortinet.fortios.fortios_user_peer
+ fortios_user_peergrp:
+ redirect: fortinet.fortios.fortios_user_peergrp
+ fortios_user_pop3:
+ redirect: fortinet.fortios.fortios_user_pop3
+ fortios_user_quarantine:
+ redirect: fortinet.fortios.fortios_user_quarantine
+ fortios_user_radius:
+ redirect: fortinet.fortios.fortios_user_radius
+ fortios_user_security_exempt_list:
+ redirect: fortinet.fortios.fortios_user_security_exempt_list
+ fortios_user_setting:
+ redirect: fortinet.fortios.fortios_user_setting
+ fortios_user_tacacsplus:
+ redirect: fortinet.fortios.fortios_user_tacacsplus
+ fortios_voip_profile:
+ redirect: fortinet.fortios.fortios_voip_profile
+ fortios_vpn_certificate_ca:
+ redirect: fortinet.fortios.fortios_vpn_certificate_ca
+ fortios_vpn_certificate_crl:
+ redirect: fortinet.fortios.fortios_vpn_certificate_crl
+ fortios_vpn_certificate_local:
+ redirect: fortinet.fortios.fortios_vpn_certificate_local
+ fortios_vpn_certificate_ocsp_server:
+ redirect: fortinet.fortios.fortios_vpn_certificate_ocsp_server
+ fortios_vpn_certificate_remote:
+ redirect: fortinet.fortios.fortios_vpn_certificate_remote
+ fortios_vpn_certificate_setting:
+ redirect: fortinet.fortios.fortios_vpn_certificate_setting
+ fortios_vpn_ipsec_concentrator:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_concentrator
+ fortios_vpn_ipsec_forticlient:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_forticlient
+ fortios_vpn_ipsec_manualkey:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_manualkey
+ fortios_vpn_ipsec_manualkey_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_manualkey_interface
+ fortios_vpn_ipsec_phase1:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase1
+ fortios_vpn_ipsec_phase1_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase1_interface
+ fortios_vpn_ipsec_phase2:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase2
+ fortios_vpn_ipsec_phase2_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase2_interface
+ fortios_vpn_l2tp:
+ redirect: fortinet.fortios.fortios_vpn_l2tp
+ fortios_vpn_pptp:
+ redirect: fortinet.fortios.fortios_vpn_pptp
+ fortios_vpn_ssl_settings:
+ redirect: fortinet.fortios.fortios_vpn_ssl_settings
+ fortios_vpn_ssl_web_host_check_software:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_host_check_software
+ fortios_vpn_ssl_web_portal:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_portal
+ fortios_vpn_ssl_web_realm:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_realm
+ fortios_vpn_ssl_web_user_bookmark:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_user_bookmark
+ fortios_vpn_ssl_web_user_group_bookmark:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_user_group_bookmark
+ fortios_waf_main_class:
+ redirect: fortinet.fortios.fortios_waf_main_class
+ fortios_waf_profile:
+ redirect: fortinet.fortios.fortios_waf_profile
+ fortios_waf_signature:
+ redirect: fortinet.fortios.fortios_waf_signature
+ fortios_waf_sub_class:
+ redirect: fortinet.fortios.fortios_waf_sub_class
+ fortios_wanopt_auth_group:
+ redirect: fortinet.fortios.fortios_wanopt_auth_group
+ fortios_wanopt_cache_service:
+ redirect: fortinet.fortios.fortios_wanopt_cache_service
+ fortios_wanopt_content_delivery_network_rule:
+ redirect: fortinet.fortios.fortios_wanopt_content_delivery_network_rule
+ fortios_wanopt_peer:
+ redirect: fortinet.fortios.fortios_wanopt_peer
+ fortios_wanopt_profile:
+ redirect: fortinet.fortios.fortios_wanopt_profile
+ fortios_wanopt_remote_storage:
+ redirect: fortinet.fortios.fortios_wanopt_remote_storage
+ fortios_wanopt_settings:
+ redirect: fortinet.fortios.fortios_wanopt_settings
+ fortios_wanopt_webcache:
+ redirect: fortinet.fortios.fortios_wanopt_webcache
+ fortios_web_proxy_debug_url:
+ redirect: fortinet.fortios.fortios_web_proxy_debug_url
+ fortios_web_proxy_explicit:
+ redirect: fortinet.fortios.fortios_web_proxy_explicit
+ fortios_web_proxy_forward_server:
+ redirect: fortinet.fortios.fortios_web_proxy_forward_server
+ fortios_web_proxy_forward_server_group:
+ redirect: fortinet.fortios.fortios_web_proxy_forward_server_group
+ fortios_web_proxy_global:
+ redirect: fortinet.fortios.fortios_web_proxy_global
+ fortios_web_proxy_profile:
+ redirect: fortinet.fortios.fortios_web_proxy_profile
+ fortios_web_proxy_url_match:
+ redirect: fortinet.fortios.fortios_web_proxy_url_match
+ fortios_web_proxy_wisp:
+ redirect: fortinet.fortios.fortios_web_proxy_wisp
+ fortios_webfilter:
+ redirect: fortinet.fortios.fortios_webfilter
+ fortios_webfilter_content:
+ redirect: fortinet.fortios.fortios_webfilter_content
+ fortios_webfilter_content_header:
+ redirect: fortinet.fortios.fortios_webfilter_content_header
+ fortios_webfilter_fortiguard:
+ redirect: fortinet.fortios.fortios_webfilter_fortiguard
+ fortios_webfilter_ftgd_local_cat:
+ redirect: fortinet.fortios.fortios_webfilter_ftgd_local_cat
+ fortios_webfilter_ftgd_local_rating:
+ redirect: fortinet.fortios.fortios_webfilter_ftgd_local_rating
+ fortios_webfilter_ips_urlfilter_cache_setting:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_cache_setting
+ fortios_webfilter_ips_urlfilter_setting:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_setting
+ fortios_webfilter_ips_urlfilter_setting6:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_setting6
+ fortios_webfilter_override:
+ redirect: fortinet.fortios.fortios_webfilter_override
+ fortios_webfilter_profile:
+ redirect: fortinet.fortios.fortios_webfilter_profile
+ fortios_webfilter_search_engine:
+ redirect: fortinet.fortios.fortios_webfilter_search_engine
+ fortios_webfilter_urlfilter:
+ redirect: fortinet.fortios.fortios_webfilter_urlfilter
+ fortios_wireless_controller_ap_status:
+ redirect: fortinet.fortios.fortios_wireless_controller_ap_status
+ fortios_wireless_controller_ble_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_ble_profile
+ fortios_wireless_controller_bonjour_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_bonjour_profile
+ fortios_wireless_controller_global:
+ redirect: fortinet.fortios.fortios_wireless_controller_global
+ fortios_wireless_controller_hotspot20_anqp_3gpp_cellular:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_3gpp_cellular
+ fortios_wireless_controller_hotspot20_anqp_ip_address_type:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_ip_address_type
+ fortios_wireless_controller_hotspot20_anqp_nai_realm:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_nai_realm
+ fortios_wireless_controller_hotspot20_anqp_network_auth_type:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_network_auth_type
+ fortios_wireless_controller_hotspot20_anqp_roaming_consortium:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_roaming_consortium
+ fortios_wireless_controller_hotspot20_anqp_venue_name:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_venue_name
+ fortios_wireless_controller_hotspot20_h2qp_conn_capability:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_conn_capability
+ fortios_wireless_controller_hotspot20_h2qp_operator_name:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_operator_name
+ fortios_wireless_controller_hotspot20_h2qp_osu_provider:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_osu_provider
+ fortios_wireless_controller_hotspot20_h2qp_wan_metric:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_wan_metric
+ fortios_wireless_controller_hotspot20_hs_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_hs_profile
+ fortios_wireless_controller_hotspot20_icon:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_icon
+ fortios_wireless_controller_hotspot20_qos_map:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_qos_map
+ fortios_wireless_controller_inter_controller:
+ redirect: fortinet.fortios.fortios_wireless_controller_inter_controller
+ fortios_wireless_controller_qos_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_qos_profile
+ fortios_wireless_controller_setting:
+ redirect: fortinet.fortios.fortios_wireless_controller_setting
+ fortios_wireless_controller_timers:
+ redirect: fortinet.fortios.fortios_wireless_controller_timers
+ fortios_wireless_controller_utm_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_utm_profile
+ fortios_wireless_controller_vap:
+ redirect: fortinet.fortios.fortios_wireless_controller_vap
+ fortios_wireless_controller_vap_group:
+ redirect: fortinet.fortios.fortios_wireless_controller_vap_group
+ fortios_wireless_controller_wids_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_wids_profile
+ fortios_wireless_controller_wtp:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp
+ fortios_wireless_controller_wtp_group:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp_group
+ fortios_wireless_controller_wtp_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp_profile
+ netbox_device:
+ redirect: netbox.netbox.netbox_device
+ netbox_ip_address:
+ redirect: netbox.netbox.netbox_ip_address
+ netbox_interface:
+ redirect: netbox.netbox.netbox_interface
+ netbox_prefix:
+ redirect: netbox.netbox.netbox_prefix
+ netbox_site:
+ redirect: netbox.netbox.netbox_site
+ aws_netapp_cvs_FileSystems:
+ redirect: netapp.aws.aws_netapp_cvs_filesystems
+ aws_netapp_cvs_active_directory:
+ redirect: netapp.aws.aws_netapp_cvs_active_directory
+ aws_netapp_cvs_pool:
+ redirect: netapp.aws.aws_netapp_cvs_pool
+ aws_netapp_cvs_snapshots:
+ redirect: netapp.aws.aws_netapp_cvs_snapshots
+ na_elementsw_access_group:
+ redirect: netapp.elementsw.na_elementsw_access_group
+ na_elementsw_account:
+ redirect: netapp.elementsw.na_elementsw_account
+ na_elementsw_admin_users:
+ redirect: netapp.elementsw.na_elementsw_admin_users
+ na_elementsw_backup:
+ redirect: netapp.elementsw.na_elementsw_backup
+ na_elementsw_check_connections:
+ redirect: netapp.elementsw.na_elementsw_check_connections
+ na_elementsw_cluster:
+ redirect: netapp.elementsw.na_elementsw_cluster
+ na_elementsw_cluster_config:
+ redirect: netapp.elementsw.na_elementsw_cluster_config
+ na_elementsw_cluster_pair:
+ redirect: netapp.elementsw.na_elementsw_cluster_pair
+ na_elementsw_cluster_snmp:
+ redirect: netapp.elementsw.na_elementsw_cluster_snmp
+ na_elementsw_drive:
+ redirect: netapp.elementsw.na_elementsw_drive
+ na_elementsw_initiators:
+ redirect: netapp.elementsw.na_elementsw_initiators
+ na_elementsw_ldap:
+ redirect: netapp.elementsw.na_elementsw_ldap
+ na_elementsw_network_interfaces:
+ redirect: netapp.elementsw.na_elementsw_network_interfaces
+ na_elementsw_node:
+ redirect: netapp.elementsw.na_elementsw_node
+ na_elementsw_snapshot:
+ redirect: netapp.elementsw.na_elementsw_snapshot
+ na_elementsw_snapshot_restore:
+ redirect: netapp.elementsw.na_elementsw_snapshot_restore
+ na_elementsw_snapshot_schedule:
+ redirect: netapp.elementsw.na_elementsw_snapshot_schedule
+ na_elementsw_vlan:
+ redirect: netapp.elementsw.na_elementsw_vlan
+ na_elementsw_volume:
+ redirect: netapp.elementsw.na_elementsw_volume
+ na_elementsw_volume_clone:
+ redirect: netapp.elementsw.na_elementsw_volume_clone
+ na_elementsw_volume_pair:
+ redirect: netapp.elementsw.na_elementsw_volume_pair
+ na_ontap_aggregate:
+ redirect: netapp.ontap.na_ontap_aggregate
+ na_ontap_autosupport:
+ redirect: netapp.ontap.na_ontap_autosupport
+ na_ontap_broadcast_domain:
+ redirect: netapp.ontap.na_ontap_broadcast_domain
+ na_ontap_broadcast_domain_ports:
+ redirect: netapp.ontap.na_ontap_broadcast_domain_ports
+ na_ontap_cg_snapshot:
+ redirect: netapp.ontap.na_ontap_cg_snapshot
+ na_ontap_cifs:
+ redirect: netapp.ontap.na_ontap_cifs
+ na_ontap_cifs_acl:
+ redirect: netapp.ontap.na_ontap_cifs_acl
+ na_ontap_cifs_server:
+ redirect: netapp.ontap.na_ontap_cifs_server
+ na_ontap_cluster:
+ redirect: netapp.ontap.na_ontap_cluster
+ na_ontap_cluster_ha:
+ redirect: netapp.ontap.na_ontap_cluster_ha
+ na_ontap_cluster_peer:
+ redirect: netapp.ontap.na_ontap_cluster_peer
+ na_ontap_command:
+ redirect: netapp.ontap.na_ontap_command
+ na_ontap_disks:
+ redirect: netapp.ontap.na_ontap_disks
+ na_ontap_dns:
+ redirect: netapp.ontap.na_ontap_dns
+ na_ontap_export_policy:
+ redirect: netapp.ontap.na_ontap_export_policy
+ na_ontap_export_policy_rule:
+ redirect: netapp.ontap.na_ontap_export_policy_rule
+ na_ontap_fcp:
+ redirect: netapp.ontap.na_ontap_fcp
+ na_ontap_firewall_policy:
+ redirect: netapp.ontap.na_ontap_firewall_policy
+ na_ontap_firmware_upgrade:
+ redirect: netapp.ontap.na_ontap_firmware_upgrade
+ na_ontap_flexcache:
+ redirect: netapp.ontap.na_ontap_flexcache
+ na_ontap_igroup:
+ redirect: netapp.ontap.na_ontap_igroup
+ na_ontap_igroup_initiator:
+ redirect: netapp.ontap.na_ontap_igroup_initiator
+ na_ontap_info:
+ redirect: netapp.ontap.na_ontap_info
+ na_ontap_interface:
+ redirect: netapp.ontap.na_ontap_interface
+ na_ontap_ipspace:
+ redirect: netapp.ontap.na_ontap_ipspace
+ na_ontap_iscsi:
+ redirect: netapp.ontap.na_ontap_iscsi
+ na_ontap_job_schedule:
+ redirect: netapp.ontap.na_ontap_job_schedule
+ na_ontap_kerberos_realm:
+ redirect: netapp.ontap.na_ontap_kerberos_realm
+ na_ontap_ldap:
+ redirect: netapp.ontap.na_ontap_ldap
+ na_ontap_ldap_client:
+ redirect: netapp.ontap.na_ontap_ldap_client
+ na_ontap_license:
+ redirect: netapp.ontap.na_ontap_license
+ na_ontap_lun:
+ redirect: netapp.ontap.na_ontap_lun
+ na_ontap_lun_copy:
+ redirect: netapp.ontap.na_ontap_lun_copy
+ na_ontap_lun_map:
+ redirect: netapp.ontap.na_ontap_lun_map
+ na_ontap_motd:
+ redirect: netapp.ontap.na_ontap_motd
+ na_ontap_ndmp:
+ redirect: netapp.ontap.na_ontap_ndmp
+ na_ontap_net_ifgrp:
+ redirect: netapp.ontap.na_ontap_net_ifgrp
+ na_ontap_net_port:
+ redirect: netapp.ontap.na_ontap_net_port
+ na_ontap_net_routes:
+ redirect: netapp.ontap.na_ontap_net_routes
+ na_ontap_net_subnet:
+ redirect: netapp.ontap.na_ontap_net_subnet
+ na_ontap_net_vlan:
+ redirect: netapp.ontap.na_ontap_net_vlan
+ na_ontap_nfs:
+ redirect: netapp.ontap.na_ontap_nfs
+ na_ontap_node:
+ redirect: netapp.ontap.na_ontap_node
+ na_ontap_ntp:
+ redirect: netapp.ontap.na_ontap_ntp
+ na_ontap_nvme:
+ redirect: netapp.ontap.na_ontap_nvme
+ na_ontap_nvme_namespace:
+ redirect: netapp.ontap.na_ontap_nvme_namespace
+ na_ontap_nvme_subsystem:
+ redirect: netapp.ontap.na_ontap_nvme_subsystem
+ na_ontap_object_store:
+ redirect: netapp.ontap.na_ontap_object_store
+ na_ontap_ports:
+ redirect: netapp.ontap.na_ontap_ports
+ na_ontap_portset:
+ redirect: netapp.ontap.na_ontap_portset
+ na_ontap_qos_adaptive_policy_group:
+ redirect: netapp.ontap.na_ontap_qos_adaptive_policy_group
+ na_ontap_qos_policy_group:
+ redirect: netapp.ontap.na_ontap_qos_policy_group
+ na_ontap_qtree:
+ redirect: netapp.ontap.na_ontap_qtree
+ na_ontap_quotas:
+ redirect: netapp.ontap.na_ontap_quotas
+ na_ontap_security_key_manager:
+ redirect: netapp.ontap.na_ontap_security_key_manager
+ na_ontap_service_processor_network:
+ redirect: netapp.ontap.na_ontap_service_processor_network
+ na_ontap_snapmirror:
+ redirect: netapp.ontap.na_ontap_snapmirror
+ na_ontap_snapshot:
+ redirect: netapp.ontap.na_ontap_snapshot
+ na_ontap_snapshot_policy:
+ redirect: netapp.ontap.na_ontap_snapshot_policy
+ na_ontap_snmp:
+ redirect: netapp.ontap.na_ontap_snmp
+ na_ontap_software_update:
+ redirect: netapp.ontap.na_ontap_software_update
+ na_ontap_svm:
+ redirect: netapp.ontap.na_ontap_svm
+ na_ontap_svm_options:
+ redirect: netapp.ontap.na_ontap_svm_options
+ na_ontap_ucadapter:
+ redirect: netapp.ontap.na_ontap_ucadapter
+ na_ontap_unix_group:
+ redirect: netapp.ontap.na_ontap_unix_group
+ na_ontap_unix_user:
+ redirect: netapp.ontap.na_ontap_unix_user
+ na_ontap_user:
+ redirect: netapp.ontap.na_ontap_user
+ na_ontap_user_role:
+ redirect: netapp.ontap.na_ontap_user_role
+ na_ontap_volume:
+ redirect: netapp.ontap.na_ontap_volume
+ na_ontap_volume_autosize:
+ redirect: netapp.ontap.na_ontap_volume_autosize
+ na_ontap_volume_clone:
+ redirect: netapp.ontap.na_ontap_volume_clone
+ na_ontap_vscan:
+ redirect: netapp.ontap.na_ontap_vscan
+ na_ontap_vscan_on_access_policy:
+ redirect: netapp.ontap.na_ontap_vscan_on_access_policy
+ na_ontap_vscan_on_demand_task:
+ redirect: netapp.ontap.na_ontap_vscan_on_demand_task
+ na_ontap_vscan_scanner_pool:
+ redirect: netapp.ontap.na_ontap_vscan_scanner_pool
+ na_ontap_vserver_cifs_security:
+ redirect: netapp.ontap.na_ontap_vserver_cifs_security
+ na_ontap_vserver_peer:
+ redirect: netapp.ontap.na_ontap_vserver_peer
+ cp_mgmt_access_layer:
+ redirect: check_point.mgmt.cp_mgmt_access_layer
+ cp_mgmt_access_layer_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_layer_facts
+ cp_mgmt_access_role:
+ redirect: check_point.mgmt.cp_mgmt_access_role
+ cp_mgmt_access_role_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_role_facts
+ cp_mgmt_access_rule:
+ redirect: check_point.mgmt.cp_mgmt_access_rule
+ cp_mgmt_access_rule_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_rule_facts
+ cp_mgmt_address_range:
+ redirect: check_point.mgmt.cp_mgmt_address_range
+ cp_mgmt_address_range_facts:
+ redirect: check_point.mgmt.cp_mgmt_address_range_facts
+ cp_mgmt_administrator:
+ redirect: check_point.mgmt.cp_mgmt_administrator
+ cp_mgmt_administrator_facts:
+ redirect: check_point.mgmt.cp_mgmt_administrator_facts
+ cp_mgmt_application_site:
+ redirect: check_point.mgmt.cp_mgmt_application_site
+ cp_mgmt_application_site_category:
+ redirect: check_point.mgmt.cp_mgmt_application_site_category
+ cp_mgmt_application_site_category_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_category_facts
+ cp_mgmt_application_site_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_facts
+ cp_mgmt_application_site_group:
+ redirect: check_point.mgmt.cp_mgmt_application_site_group
+ cp_mgmt_application_site_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_group_facts
+ cp_mgmt_assign_global_assignment:
+ redirect: check_point.mgmt.cp_mgmt_assign_global_assignment
+ cp_mgmt_discard:
+ redirect: check_point.mgmt.cp_mgmt_discard
+ cp_mgmt_dns_domain:
+ redirect: check_point.mgmt.cp_mgmt_dns_domain
+ cp_mgmt_dns_domain_facts:
+ redirect: check_point.mgmt.cp_mgmt_dns_domain_facts
+ cp_mgmt_dynamic_object:
+ redirect: check_point.mgmt.cp_mgmt_dynamic_object
+ cp_mgmt_dynamic_object_facts:
+ redirect: check_point.mgmt.cp_mgmt_dynamic_object_facts
+ cp_mgmt_exception_group:
+ redirect: check_point.mgmt.cp_mgmt_exception_group
+ cp_mgmt_exception_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_exception_group_facts
+ cp_mgmt_global_assignment:
+ redirect: check_point.mgmt.cp_mgmt_global_assignment
+ cp_mgmt_global_assignment_facts:
+ redirect: check_point.mgmt.cp_mgmt_global_assignment_facts
+ cp_mgmt_group:
+ redirect: check_point.mgmt.cp_mgmt_group
+ cp_mgmt_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_group_facts
+ cp_mgmt_group_with_exclusion:
+ redirect: check_point.mgmt.cp_mgmt_group_with_exclusion
+ cp_mgmt_group_with_exclusion_facts:
+ redirect: check_point.mgmt.cp_mgmt_group_with_exclusion_facts
+ cp_mgmt_host:
+ redirect: check_point.mgmt.cp_mgmt_host
+ cp_mgmt_host_facts:
+ redirect: check_point.mgmt.cp_mgmt_host_facts
+ cp_mgmt_install_policy:
+ redirect: check_point.mgmt.cp_mgmt_install_policy
+ cp_mgmt_mds_facts:
+ redirect: check_point.mgmt.cp_mgmt_mds_facts
+ cp_mgmt_multicast_address_range:
+ redirect: check_point.mgmt.cp_mgmt_multicast_address_range
+ cp_mgmt_multicast_address_range_facts:
+ redirect: check_point.mgmt.cp_mgmt_multicast_address_range_facts
+ cp_mgmt_network:
+ redirect: check_point.mgmt.cp_mgmt_network
+ cp_mgmt_network_facts:
+ redirect: check_point.mgmt.cp_mgmt_network_facts
+ cp_mgmt_package:
+ redirect: check_point.mgmt.cp_mgmt_package
+ cp_mgmt_package_facts:
+ redirect: check_point.mgmt.cp_mgmt_package_facts
+ cp_mgmt_publish:
+ redirect: check_point.mgmt.cp_mgmt_publish
+ cp_mgmt_put_file:
+ redirect: check_point.mgmt.cp_mgmt_put_file
+ cp_mgmt_run_ips_update:
+ redirect: check_point.mgmt.cp_mgmt_run_ips_update
+ cp_mgmt_run_script:
+ redirect: check_point.mgmt.cp_mgmt_run_script
+ cp_mgmt_security_zone:
+ redirect: check_point.mgmt.cp_mgmt_security_zone
+ cp_mgmt_security_zone_facts:
+ redirect: check_point.mgmt.cp_mgmt_security_zone_facts
+ cp_mgmt_service_dce_rpc:
+ redirect: check_point.mgmt.cp_mgmt_service_dce_rpc
+ cp_mgmt_service_dce_rpc_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_dce_rpc_facts
+ cp_mgmt_service_group:
+ redirect: check_point.mgmt.cp_mgmt_service_group
+ cp_mgmt_service_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_group_facts
+ cp_mgmt_service_icmp:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp
+ cp_mgmt_service_icmp6:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp6
+ cp_mgmt_service_icmp6_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp6_facts
+ cp_mgmt_service_icmp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp_facts
+ cp_mgmt_service_other:
+ redirect: check_point.mgmt.cp_mgmt_service_other
+ cp_mgmt_service_other_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_other_facts
+ cp_mgmt_service_rpc:
+ redirect: check_point.mgmt.cp_mgmt_service_rpc
+ cp_mgmt_service_rpc_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_rpc_facts
+ cp_mgmt_service_sctp:
+ redirect: check_point.mgmt.cp_mgmt_service_sctp
+ cp_mgmt_service_sctp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_sctp_facts
+ cp_mgmt_service_tcp:
+ redirect: check_point.mgmt.cp_mgmt_service_tcp
+ cp_mgmt_service_tcp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_tcp_facts
+ cp_mgmt_service_udp:
+ redirect: check_point.mgmt.cp_mgmt_service_udp
+ cp_mgmt_service_udp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_udp_facts
+ cp_mgmt_session_facts:
+ redirect: check_point.mgmt.cp_mgmt_session_facts
+ cp_mgmt_simple_gateway:
+ redirect: check_point.mgmt.cp_mgmt_simple_gateway
+ cp_mgmt_simple_gateway_facts:
+ redirect: check_point.mgmt.cp_mgmt_simple_gateway_facts
+ cp_mgmt_tag:
+ redirect: check_point.mgmt.cp_mgmt_tag
+ cp_mgmt_tag_facts:
+ redirect: check_point.mgmt.cp_mgmt_tag_facts
+ cp_mgmt_threat_exception:
+ redirect: check_point.mgmt.cp_mgmt_threat_exception
+ cp_mgmt_threat_exception_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_exception_facts
+ cp_mgmt_threat_indicator:
+ redirect: check_point.mgmt.cp_mgmt_threat_indicator
+ cp_mgmt_threat_indicator_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_indicator_facts
+ cp_mgmt_threat_layer:
+ redirect: check_point.mgmt.cp_mgmt_threat_layer
+ cp_mgmt_threat_layer_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_layer_facts
+ cp_mgmt_threat_profile:
+ redirect: check_point.mgmt.cp_mgmt_threat_profile
+ cp_mgmt_threat_profile_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_profile_facts
+ cp_mgmt_threat_protection_override:
+ redirect: check_point.mgmt.cp_mgmt_threat_protection_override
+ cp_mgmt_threat_rule:
+ redirect: check_point.mgmt.cp_mgmt_threat_rule
+ cp_mgmt_threat_rule_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_rule_facts
+ cp_mgmt_time:
+ redirect: check_point.mgmt.cp_mgmt_time
+ cp_mgmt_time_facts:
+ redirect: check_point.mgmt.cp_mgmt_time_facts
+ cp_mgmt_verify_policy:
+ redirect: check_point.mgmt.cp_mgmt_verify_policy
+ cp_mgmt_vpn_community_meshed:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_meshed
+ cp_mgmt_vpn_community_meshed_facts:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_meshed_facts
+ cp_mgmt_vpn_community_star:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_star
+ cp_mgmt_vpn_community_star_facts:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_star_facts
+ cp_mgmt_wildcard:
+ redirect: check_point.mgmt.cp_mgmt_wildcard
+ cp_mgmt_wildcard_facts:
+ redirect: check_point.mgmt.cp_mgmt_wildcard_facts
+ eos_ospfv2:
+ redirect: arista.eos.eos_ospfv2
+ eos_static_route:
+ redirect: arista.eos.eos_static_route
+ eos_acls:
+ redirect: arista.eos.eos_acls
+ eos_interfaces:
+ redirect: arista.eos.eos_interfaces
+ eos_facts:
+ redirect: arista.eos.eos_facts
+ eos_logging:
+ redirect: arista.eos.eos_logging
+ eos_lag_interfaces:
+ redirect: arista.eos.eos_lag_interfaces
+ eos_l2_interfaces:
+ redirect: arista.eos.eos_l2_interfaces
+ eos_l3_interface:
+ redirect: arista.eos.eos_l3_interface
+ eos_lacp:
+ redirect: arista.eos.eos_lacp
+ eos_lldp_global:
+ redirect: arista.eos.eos_lldp_global
+ eos_static_routes:
+ redirect: arista.eos.eos_static_routes
+ eos_lacp_interfaces:
+ redirect: arista.eos.eos_lacp_interfaces
+ eos_system:
+ redirect: arista.eos.eos_system
+ eos_vlan:
+ redirect: arista.eos.eos_vlan
+ eos_eapi:
+ redirect: arista.eos.eos_eapi
+ eos_acl_interfaces:
+ redirect: arista.eos.eos_acl_interfaces
+ eos_l2_interface:
+ redirect: arista.eos.eos_l2_interface
+ eos_lldp_interfaces:
+ redirect: arista.eos.eos_lldp_interfaces
+ eos_command:
+ redirect: arista.eos.eos_command
+ eos_linkagg:
+ redirect: arista.eos.eos_linkagg
+ eos_l3_interfaces:
+ redirect: arista.eos.eos_l3_interfaces
+ eos_vlans:
+ redirect: arista.eos.eos_vlans
+ eos_user:
+ redirect: arista.eos.eos_user
+ eos_banner:
+ redirect: arista.eos.eos_banner
+ eos_lldp:
+ redirect: arista.eos.eos_lldp
+ eos_interface:
+ redirect: arista.eos.eos_interface
+ eos_config:
+ redirect: arista.eos.eos_config
+ eos_bgp:
+ redirect: arista.eos.eos_bgp
+ eos_vrf:
+ redirect: arista.eos.eos_vrf
+ aci_aaa_user:
+ redirect: cisco.aci.aci_aaa_user
+ aci_aaa_user_certificate:
+ redirect: cisco.aci.aci_aaa_user_certificate
+ aci_access_port_block_to_access_port:
+ redirect: cisco.aci.aci_access_port_block_to_access_port
+ aci_access_port_to_interface_policy_leaf_profile:
+ redirect: cisco.aci.aci_access_port_to_interface_policy_leaf_profile
+ aci_access_sub_port_block_to_access_port:
+ redirect: cisco.aci.aci_access_sub_port_block_to_access_port
+ aci_aep:
+ redirect: cisco.aci.aci_aep
+ aci_aep_to_domain:
+ redirect: cisco.aci.aci_aep_to_domain
+ aci_ap:
+ redirect: cisco.aci.aci_ap
+ aci_bd:
+ redirect: cisco.aci.aci_bd
+ aci_bd_subnet:
+ redirect: cisco.aci.aci_bd_subnet
+ aci_bd_to_l3out:
+ redirect: cisco.aci.aci_bd_to_l3out
+ aci_config_rollback:
+ redirect: cisco.aci.aci_config_rollback
+ aci_config_snapshot:
+ redirect: cisco.aci.aci_config_snapshot
+ aci_contract:
+ redirect: cisco.aci.aci_contract
+ aci_contract_subject:
+ redirect: cisco.aci.aci_contract_subject
+ aci_contract_subject_to_filter:
+ redirect: cisco.aci.aci_contract_subject_to_filter
+ aci_domain:
+ redirect: cisco.aci.aci_domain
+ aci_domain_to_encap_pool:
+ redirect: cisco.aci.aci_domain_to_encap_pool
+ aci_domain_to_vlan_pool:
+ redirect: cisco.aci.aci_domain_to_vlan_pool
+ aci_encap_pool:
+ redirect: cisco.aci.aci_encap_pool
+ aci_encap_pool_range:
+ redirect: cisco.aci.aci_encap_pool_range
+ aci_epg:
+ redirect: cisco.aci.aci_epg
+ aci_epg_monitoring_policy:
+ redirect: cisco.aci.aci_epg_monitoring_policy
+ aci_epg_to_contract:
+ redirect: cisco.aci.aci_epg_to_contract
+ aci_epg_to_domain:
+ redirect: cisco.aci.aci_epg_to_domain
+ aci_fabric_node:
+ redirect: cisco.aci.aci_fabric_node
+ aci_fabric_scheduler:
+ redirect: cisco.aci.aci_fabric_scheduler
+ aci_filter:
+ redirect: cisco.aci.aci_filter
+ aci_filter_entry:
+ redirect: cisco.aci.aci_filter_entry
+ aci_firmware_group:
+ redirect: cisco.aci.aci_firmware_group
+ aci_firmware_group_node:
+ redirect: cisco.aci.aci_firmware_group_node
+ aci_firmware_policy:
+ redirect: cisco.aci.aci_firmware_policy
+ aci_firmware_source:
+ redirect: cisco.aci.aci_firmware_source
+ aci_interface_policy_cdp:
+ redirect: cisco.aci.aci_interface_policy_cdp
+ aci_interface_policy_fc:
+ redirect: cisco.aci.aci_interface_policy_fc
+ aci_interface_policy_l2:
+ redirect: cisco.aci.aci_interface_policy_l2
+ aci_interface_policy_leaf_policy_group:
+ redirect: cisco.aci.aci_interface_policy_leaf_policy_group
+ aci_interface_policy_leaf_profile:
+ redirect: cisco.aci.aci_interface_policy_leaf_profile
+ aci_interface_policy_lldp:
+ redirect: cisco.aci.aci_interface_policy_lldp
+ aci_interface_policy_mcp:
+ redirect: cisco.aci.aci_interface_policy_mcp
+ aci_interface_policy_ospf:
+ redirect: cisco.aci.aci_interface_policy_ospf
+ aci_interface_policy_port_channel:
+ redirect: cisco.aci.aci_interface_policy_port_channel
+ aci_interface_policy_port_security:
+ redirect: cisco.aci.aci_interface_policy_port_security
+ aci_interface_selector_to_switch_policy_leaf_profile:
+ redirect: cisco.aci.aci_interface_selector_to_switch_policy_leaf_profile
+ aci_l3out:
+ redirect: cisco.aci.aci_l3out
+ aci_l3out_extepg:
+ redirect: cisco.aci.aci_l3out_extepg
+ aci_l3out_extsubnet:
+ redirect: cisco.aci.aci_l3out_extsubnet
+ aci_l3out_route_tag_policy:
+ redirect: cisco.aci.aci_l3out_route_tag_policy
+ aci_maintenance_group:
+ redirect: cisco.aci.aci_maintenance_group
+ aci_maintenance_group_node:
+ redirect: cisco.aci.aci_maintenance_group_node
+ aci_maintenance_policy:
+ redirect: cisco.aci.aci_maintenance_policy
+ aci_rest:
+ redirect: cisco.aci.aci_rest
+ aci_static_binding_to_epg:
+ redirect: cisco.aci.aci_static_binding_to_epg
+ aci_switch_leaf_selector:
+ redirect: cisco.aci.aci_switch_leaf_selector
+ aci_switch_policy_leaf_profile:
+ redirect: cisco.aci.aci_switch_policy_leaf_profile
+ aci_switch_policy_vpc_protection_group:
+ redirect: cisco.aci.aci_switch_policy_vpc_protection_group
+ aci_taboo_contract:
+ redirect: cisco.aci.aci_taboo_contract
+ aci_tenant:
+ redirect: cisco.aci.aci_tenant
+ aci_tenant_action_rule_profile:
+ redirect: cisco.aci.aci_tenant_action_rule_profile
+ aci_tenant_ep_retention_policy:
+ redirect: cisco.aci.aci_tenant_ep_retention_policy
+ aci_tenant_span_dst_group:
+ redirect: cisco.aci.aci_tenant_span_dst_group
+ aci_tenant_span_src_group:
+ redirect: cisco.aci.aci_tenant_span_src_group
+ aci_tenant_span_src_group_to_dst_group:
+ redirect: cisco.aci.aci_tenant_span_src_group_to_dst_group
+ aci_vlan_pool:
+ redirect: cisco.aci.aci_vlan_pool
+ aci_vlan_pool_encap_block:
+ redirect: cisco.aci.aci_vlan_pool_encap_block
+ aci_vmm_credential:
+ redirect: cisco.aci.aci_vmm_credential
+ aci_vrf:
+ redirect: cisco.aci.aci_vrf
+ asa_acl:
+ redirect: cisco.asa.asa_acl
+ asa_config:
+ redirect: cisco.asa.asa_config
+ asa_og:
+ redirect: cisco.asa.asa_og
+ asa_command:
+ redirect: cisco.asa.asa_command
+ intersight_facts:
+ redirect: cisco.intersight.intersight_info
+ intersight_info:
+ redirect: cisco.intersight.intersight_info
+ intersight_rest_api:
+ redirect: cisco.intersight.intersight_rest_api
+ ios_ospfv2:
+ redirect: cisco.ios.ios_ospfv2
+ ios_l3_interfaces:
+ redirect: cisco.ios.ios_l3_interfaces
+ ios_lldp:
+ redirect: cisco.ios.ios_lldp
+ ios_interface:
+ redirect: cisco.ios.ios_interface
+ ios_lldp_interfaces:
+ redirect: cisco.ios.ios_lldp_interfaces
+ ios_l3_interface:
+ redirect: cisco.ios.ios_l3_interface
+ ios_acl_interfaces:
+ redirect: cisco.ios.ios_acl_interfaces
+ ios_static_routes:
+ redirect: cisco.ios.ios_static_routes
+ ios_l2_interfaces:
+ redirect: cisco.ios.ios_l2_interfaces
+ ios_logging:
+ redirect: cisco.ios.ios_logging
+ ios_vlan:
+ redirect: cisco.ios.ios_vlan
+ ios_command:
+ redirect: cisco.ios.ios_command
+ ios_static_route:
+ redirect: cisco.ios.ios_static_route
+ ios_lldp_global:
+ redirect: cisco.ios.ios_lldp_global
+ ios_banner:
+ redirect: cisco.ios.ios_banner
+ ios_lag_interfaces:
+ redirect: cisco.ios.ios_lag_interfaces
+ ios_linkagg:
+ redirect: cisco.ios.ios_linkagg
+ ios_user:
+ redirect: cisco.ios.ios_user
+ ios_system:
+ redirect: cisco.ios.ios_system
+ ios_facts:
+ redirect: cisco.ios.ios_facts
+ ios_ping:
+ redirect: cisco.ios.ios_ping
+ ios_vlans:
+ redirect: cisco.ios.ios_vlans
+ ios_vrf:
+ redirect: cisco.ios.ios_vrf
+ ios_bgp:
+ redirect: cisco.ios.ios_bgp
+ ios_ntp:
+ redirect: cisco.ios.ios_ntp
+ ios_lacp_interfaces:
+ redirect: cisco.ios.ios_lacp_interfaces
+ ios_lacp:
+ redirect: cisco.ios.ios_lacp
+ ios_config:
+ redirect: cisco.ios.ios_config
+ ios_l2_interface:
+ redirect: cisco.ios.ios_l2_interface
+ ios_acls:
+ redirect: cisco.ios.ios_acls
+ ios_interfaces:
+ redirect: cisco.ios.ios_interfaces
+ iosxr_ospfv2:
+ redirect: cisco.iosxr.iosxr_ospfv2
+ iosxr_bgp:
+ redirect: cisco.iosxr.iosxr_bgp
+ iosxr_lldp_interfaces:
+ redirect: cisco.iosxr.iosxr_lldp_interfaces
+ iosxr_l3_interfaces:
+ redirect: cisco.iosxr.iosxr_l3_interfaces
+ iosxr_netconf:
+ redirect: cisco.iosxr.iosxr_netconf
+ iosxr_static_routes:
+ redirect: cisco.iosxr.iosxr_static_routes
+ iosxr_lldp_global:
+ redirect: cisco.iosxr.iosxr_lldp_global
+ iosxr_config:
+ redirect: cisco.iosxr.iosxr_config
+ iosxr_lag_interfaces:
+ redirect: cisco.iosxr.iosxr_lag_interfaces
+ iosxr_interface:
+ redirect: cisco.iosxr.iosxr_interface
+ iosxr_user:
+ redirect: cisco.iosxr.iosxr_user
+ iosxr_facts:
+ redirect: cisco.iosxr.iosxr_facts
+ iosxr_interfaces:
+ redirect: cisco.iosxr.iosxr_interfaces
+ iosxr_acl_interfaces:
+ redirect: cisco.iosxr.iosxr_acl_interfaces
+ iosxr_l2_interfaces:
+ redirect: cisco.iosxr.iosxr_l2_interfaces
+ iosxr_logging:
+ redirect: cisco.iosxr.iosxr_logging
+ iosxr_lacp:
+ redirect: cisco.iosxr.iosxr_lacp
+ iosxr_acls:
+ redirect: cisco.iosxr.iosxr_acls
+ iosxr_system:
+ redirect: cisco.iosxr.iosxr_system
+ iosxr_command:
+ redirect: cisco.iosxr.iosxr_command
+ iosxr_lacp_interfaces:
+ redirect: cisco.iosxr.iosxr_lacp_interfaces
+ iosxr_banner:
+ redirect: cisco.iosxr.iosxr_banner
+ meraki_admin:
+ redirect: cisco.meraki.meraki_admin
+ meraki_config_template:
+ redirect: cisco.meraki.meraki_config_template
+ meraki_content_filtering:
+ redirect: cisco.meraki.meraki_content_filtering
+ meraki_device:
+ redirect: cisco.meraki.meraki_device
+ meraki_firewalled_services:
+ redirect: cisco.meraki.meraki_firewalled_services
+ meraki_malware:
+ redirect: cisco.meraki.meraki_malware
+ meraki_mr_l3_firewall:
+ redirect: cisco.meraki.meraki_mr_l3_firewall
+ meraki_mx_l3_firewall:
+ redirect: cisco.meraki.meraki_mx_l3_firewall
+ meraki_mx_l7_firewall:
+ redirect: cisco.meraki.meraki_mx_l7_firewall
+ meraki_nat:
+ redirect: cisco.meraki.meraki_nat
+ meraki_network:
+ redirect: cisco.meraki.meraki_network
+ meraki_organization:
+ redirect: cisco.meraki.meraki_organization
+ meraki_snmp:
+ redirect: cisco.meraki.meraki_snmp
+ meraki_ssid:
+ redirect: cisco.meraki.meraki_ssid
+ meraki_static_route:
+ redirect: cisco.meraki.meraki_static_route
+ meraki_switchport:
+ redirect: cisco.meraki.meraki_switchport
+ meraki_syslog:
+ redirect: cisco.meraki.meraki_syslog
+ meraki_vlan:
+ redirect: cisco.meraki.meraki_vlan
+ meraki_webhook:
+ redirect: cisco.meraki.meraki_webhook
+ mso_label:
+ redirect: cisco.mso.mso_label
+ mso_role:
+ redirect: cisco.mso.mso_role
+ mso_schema:
+ redirect: cisco.mso.mso_schema
+ mso_schema_site:
+ redirect: cisco.mso.mso_schema_site
+ mso_schema_site_anp:
+ redirect: cisco.mso.mso_schema_site_anp
+ mso_schema_site_anp_epg:
+ redirect: cisco.mso.mso_schema_site_anp_epg
+ mso_schema_site_anp_epg_domain:
+ redirect: cisco.mso.mso_schema_site_anp_epg_domain
+ mso_schema_site_anp_epg_staticleaf:
+ redirect: cisco.mso.mso_schema_site_anp_epg_staticleaf
+ mso_schema_site_anp_epg_staticport:
+ redirect: cisco.mso.mso_schema_site_anp_epg_staticport
+ mso_schema_site_anp_epg_subnet:
+ redirect: cisco.mso.mso_schema_site_anp_epg_subnet
+ mso_schema_site_bd:
+ redirect: cisco.mso.mso_schema_site_bd
+ mso_schema_site_bd_l3out:
+ redirect: cisco.mso.mso_schema_site_bd_l3out
+ mso_schema_site_bd_subnet:
+ redirect: cisco.mso.mso_schema_site_bd_subnet
+ mso_schema_site_vrf:
+ redirect: cisco.mso.mso_schema_site_vrf
+ mso_schema_site_vrf_region:
+ redirect: cisco.mso.mso_schema_site_vrf_region
+ mso_schema_site_vrf_region_cidr:
+ redirect: cisco.mso.mso_schema_site_vrf_region_cidr
+ mso_schema_site_vrf_region_cidr_subnet:
+ redirect: cisco.mso.mso_schema_site_vrf_region_cidr_subnet
+ mso_schema_template:
+ redirect: cisco.mso.mso_schema_template
+ mso_schema_template_anp:
+ redirect: cisco.mso.mso_schema_template_anp
+ mso_schema_template_anp_epg:
+ redirect: cisco.mso.mso_schema_template_anp_epg
+ mso_schema_template_anp_epg_contract:
+ redirect: cisco.mso.mso_schema_template_anp_epg_contract
+ mso_schema_template_anp_epg_subnet:
+ redirect: cisco.mso.mso_schema_template_anp_epg_subnet
+ mso_schema_template_bd:
+ redirect: cisco.mso.mso_schema_template_bd
+ mso_schema_template_bd_subnet:
+ redirect: cisco.mso.mso_schema_template_bd_subnet
+ mso_schema_template_contract_filter:
+ redirect: cisco.mso.mso_schema_template_contract_filter
+ mso_schema_template_deploy:
+ redirect: cisco.mso.mso_schema_template_deploy
+ mso_schema_template_externalepg:
+ redirect: cisco.mso.mso_schema_template_externalepg
+ mso_schema_template_filter_entry:
+ redirect: cisco.mso.mso_schema_template_filter_entry
+ mso_schema_template_l3out:
+ redirect: cisco.mso.mso_schema_template_l3out
+ mso_schema_template_vrf:
+ redirect: cisco.mso.mso_schema_template_vrf
+ mso_site:
+ redirect: cisco.mso.mso_site
+ mso_tenant:
+ redirect: cisco.mso.mso_tenant
+ mso_user:
+ redirect: cisco.mso.mso_user
+ nxos_telemetry:
+ redirect: cisco.nxos.nxos_telemetry
+ nxos_user:
+ redirect: cisco.nxos.nxos_user
+ nxos_bfd_interfaces:
+ redirect: cisco.nxos.nxos_bfd_interfaces
+ nxos_ospf:
+ redirect: cisco.nxos.nxos_ospf
+ nxos_ospfv2:
+ redirect: cisco.nxos.nxos_ospfv2
+ nxos_system:
+ redirect: cisco.nxos.nxos_system
+ nxos_l3_interface:
+ redirect: cisco.nxos.nxos_l3_interface
+ nxos_smu:
+ redirect: cisco.nxos.nxos_smu
+ nxos_reboot:
+ redirect: cisco.nxos.nxos_reboot
+ nxos_static_routes:
+ redirect: cisco.nxos.nxos_static_routes
+ nxos_static_route:
+ redirect: cisco.nxos.nxos_static_route
+ nxos_acl_interfaces:
+ redirect: cisco.nxos.nxos_acl_interfaces
+ nxos_vpc:
+ redirect: cisco.nxos.nxos_vpc
+ nxos_linkagg:
+ redirect: cisco.nxos.nxos_linkagg
+ nxos_vxlan_vtep_vni:
+ redirect: cisco.nxos.nxos_vxlan_vtep_vni
+ nxos_vrrp:
+ redirect: cisco.nxos.nxos_vrrp
+ nxos_lldp:
+ redirect: cisco.nxos.nxos_lldp
+ nxos_interface:
+ redirect: cisco.nxos.nxos_interface
+ nxos_lacp_interfaces:
+ redirect: cisco.nxos.nxos_lacp_interfaces
+ nxos_gir_profile_management:
+ redirect: cisco.nxos.nxos_gir_profile_management
+ nxos_snmp_community:
+ redirect: cisco.nxos.nxos_snmp_community
+ nxos_lag_interfaces:
+ redirect: cisco.nxos.nxos_lag_interfaces
+ nxos_acl:
+ redirect: cisco.nxos.nxos_acl
+ nxos_hsrp_interfaces:
+ redirect: cisco.nxos.nxos_hsrp_interfaces
+ nxos_lldp_global:
+ redirect: cisco.nxos.nxos_lldp_global
+ nxos_snmp_contact:
+ redirect: cisco.nxos.nxos_snmp_contact
+ nxos_vrf_interface:
+ redirect: cisco.nxos.nxos_vrf_interface
+ nxos_rpm:
+ redirect: cisco.nxos.nxos_rpm
+ nxos_ntp_options:
+ redirect: cisco.nxos.nxos_ntp_options
+ nxos_ospf_vrf:
+ redirect: cisco.nxos.nxos_ospf_vrf
+ nxos_vtp_version:
+ redirect: cisco.nxos.nxos_vtp_version
+ nxos_igmp_interface:
+ redirect: cisco.nxos.nxos_igmp_interface
+ nxos_bgp_neighbor:
+ redirect: cisco.nxos.nxos_bgp_neighbor
+ nxos_bgp:
+ redirect: cisco.nxos.nxos_bgp
+ nxos_rollback:
+ redirect: cisco.nxos.nxos_rollback
+ nxos_aaa_server:
+ redirect: cisco.nxos.nxos_aaa_server
+ nxos_udld_interface:
+ redirect: cisco.nxos.nxos_udld_interface
+ nxos_bgp_af:
+ redirect: cisco.nxos.nxos_bgp_af
+ nxos_feature:
+ redirect: cisco.nxos.nxos_feature
+ nxos_snmp_traps:
+ redirect: cisco.nxos.nxos_snmp_traps
+ nxos_evpn_global:
+ redirect: cisco.nxos.nxos_evpn_global
+ nxos_igmp:
+ redirect: cisco.nxos.nxos_igmp
+ nxos_aaa_server_host:
+ redirect: cisco.nxos.nxos_aaa_server_host
+ nxos_vrf_af:
+ redirect: cisco.nxos.nxos_vrf_af
+ nxos_snapshot:
+ redirect: cisco.nxos.nxos_snapshot
+ nxos_gir:
+ redirect: cisco.nxos.nxos_gir
+ nxos_command:
+ redirect: cisco.nxos.nxos_command
+ nxos_vxlan_vtep:
+ redirect: cisco.nxos.nxos_vxlan_vtep
+ nxos_snmp_location:
+ redirect: cisco.nxos.nxos_snmp_location
+ nxos_evpn_vni:
+ redirect: cisco.nxos.nxos_evpn_vni
+ nxos_vpc_interface:
+ redirect: cisco.nxos.nxos_vpc_interface
+ nxos_logging:
+ redirect: cisco.nxos.nxos_logging
+ nxos_pim:
+ redirect: cisco.nxos.nxos_pim
+ nxos_ping:
+ redirect: cisco.nxos.nxos_ping
+ nxos_pim_rp_address:
+ redirect: cisco.nxos.nxos_pim_rp_address
+ nxos_pim_interface:
+ redirect: cisco.nxos.nxos_pim_interface
+ nxos_install_os:
+ redirect: cisco.nxos.nxos_install_os
+ nxos_nxapi:
+ redirect: cisco.nxos.nxos_nxapi
+ nxos_l2_interface:
+ redirect: cisco.nxos.nxos_l2_interface
+ nxos_bgp_neighbor_af:
+ redirect: cisco.nxos.nxos_bgp_neighbor_af
+ nxos_lacp:
+ redirect: cisco.nxos.nxos_lacp
+ nxos_lldp_interfaces:
+ redirect: cisco.nxos.nxos_lldp_interfaces
+ nxos_acl_interface:
+ redirect: cisco.nxos.nxos_acl_interface
+ nxos_vrf:
+ redirect: cisco.nxos.nxos_vrf
+ nxos_interface_ospf:
+ redirect: cisco.nxos.nxos_interface_ospf
+ nxos_acls:
+ redirect: cisco.nxos.nxos_acls
+ nxos_vtp_password:
+ redirect: cisco.nxos.nxos_vtp_password
+ nxos_l3_interfaces:
+ redirect: cisco.nxos.nxos_l3_interfaces
+ nxos_igmp_snooping:
+ redirect: cisco.nxos.nxos_igmp_snooping
+ nxos_banner:
+ redirect: cisco.nxos.nxos_banner
+ nxos_bfd_global:
+ redirect: cisco.nxos.nxos_bfd_global
+ nxos_udld:
+ redirect: cisco.nxos.nxos_udld
+ nxos_vtp_domain:
+ redirect: cisco.nxos.nxos_vtp_domain
+ nxos_snmp_host:
+ redirect: cisco.nxos.nxos_snmp_host
+ nxos_l2_interfaces:
+ redirect: cisco.nxos.nxos_l2_interfaces
+ nxos_hsrp:
+ redirect: cisco.nxos.nxos_hsrp
+ nxos_interfaces:
+ redirect: cisco.nxos.nxos_interfaces
+ nxos_overlay_global:
+ redirect: cisco.nxos.nxos_overlay_global
+ nxos_snmp_user:
+ redirect: cisco.nxos.nxos_snmp_user
+ nxos_vlans:
+ redirect: cisco.nxos.nxos_vlans
+ nxos_ntp:
+ redirect: cisco.nxos.nxos_ntp
+ nxos_file_copy:
+ redirect: cisco.nxos.nxos_file_copy
+ nxos_ntp_auth:
+ redirect: cisco.nxos.nxos_ntp_auth
+ nxos_config:
+ redirect: cisco.nxos.nxos_config
+ nxos_vlan:
+ redirect: cisco.nxos.nxos_vlan
+ nxos_facts:
+ redirect: cisco.nxos.nxos_facts
+ nxos_zone_zoneset:
+ redirect: cisco.nxos.nxos_zone_zoneset
+ nxos_vsan:
+ redirect: cisco.nxos.nxos_vsan
+ nxos_devicealias:
+ redirect: cisco.nxos.nxos_devicealias
+ ucs_managed_objects:
+ redirect: cisco.ucs.ucs_managed_objects
+ ucs_vnic_template:
+ redirect: cisco.ucs.ucs_vnic_template
+ ucs_query:
+ redirect: cisco.ucs.ucs_query
+ ucs_dns_server:
+ redirect: cisco.ucs.ucs_dns_server
+ ucs_lan_connectivity:
+ redirect: cisco.ucs.ucs_lan_connectivity
+ ucs_vhba_template:
+ redirect: cisco.ucs.ucs_vhba_template
+ ucs_san_connectivity:
+ redirect: cisco.ucs.ucs_san_connectivity
+ ucs_disk_group_policy:
+ redirect: cisco.ucs.ucs_disk_group_policy
+ ucs_uuid_pool:
+ redirect: cisco.ucs.ucs_uuid_pool
+ ucs_vlan_find:
+ redirect: cisco.ucs.ucs_vlan_find
+ ucs_vlans:
+ redirect: cisco.ucs.ucs_vlans
+ ucs_service_profile_template:
+ redirect: cisco.ucs.ucs_service_profile_template
+ ucs_ip_pool:
+ redirect: cisco.ucs.ucs_ip_pool
+ ucs_timezone:
+ redirect: cisco.ucs.ucs_timezone
+ ucs_ntp_server:
+ redirect: cisco.ucs.ucs_ntp_server
+ ucs_mac_pool:
+ redirect: cisco.ucs.ucs_mac_pool
+ ucs_storage_profile:
+ redirect: cisco.ucs.ucs_storage_profile
+ ucs_org:
+ redirect: cisco.ucs.ucs_org
+ ucs_vsans:
+ redirect: cisco.ucs.ucs_vsans
+ ucs_wwn_pool:
+ redirect: cisco.ucs.ucs_wwn_pool
+ bigip_apm_acl:
+ redirect: f5networks.f5_modules.bigip_apm_acl
+ bigip_apm_network_access:
+ redirect: f5networks.f5_modules.bigip_apm_network_access
+ bigip_apm_policy_fetch:
+ redirect: f5networks.f5_modules.bigip_apm_policy_fetch
+ bigip_apm_policy_import:
+ redirect: f5networks.f5_modules.bigip_apm_policy_import
+ bigip_appsvcs_extension:
+ redirect: f5networks.f5_modules.bigip_appsvcs_extension
+ bigip_asm_dos_application:
+ redirect: f5networks.f5_modules.bigip_asm_dos_application
+ bigip_asm_policy_fetch:
+ redirect: f5networks.f5_modules.bigip_asm_policy_fetch
+ bigip_asm_policy_import:
+ redirect: f5networks.f5_modules.bigip_asm_policy_import
+ bigip_asm_policy_manage:
+ redirect: f5networks.f5_modules.bigip_asm_policy_manage
+ bigip_asm_policy_server_technology:
+ redirect: f5networks.f5_modules.bigip_asm_policy_server_technology
+ bigip_asm_policy_signature_set:
+ redirect: f5networks.f5_modules.bigip_asm_policy_signature_set
+ bigip_cli_alias:
+ redirect: f5networks.f5_modules.bigip_cli_alias
+ bigip_cli_script:
+ redirect: f5networks.f5_modules.bigip_cli_script
+ bigip_command:
+ redirect: f5networks.f5_modules.bigip_command
+ bigip_config:
+ redirect: f5networks.f5_modules.bigip_config
+ bigip_configsync_action:
+ redirect: f5networks.f5_modules.bigip_configsync_action
+ bigip_data_group:
+ redirect: f5networks.f5_modules.bigip_data_group
+ bigip_device_auth:
+ redirect: f5networks.f5_modules.bigip_device_auth
+ bigip_device_auth_ldap:
+ redirect: f5networks.f5_modules.bigip_device_auth_ldap
+ bigip_device_certificate:
+ redirect: f5networks.f5_modules.bigip_device_certificate
+ bigip_device_connectivity:
+ redirect: f5networks.f5_modules.bigip_device_connectivity
+ bigip_device_dns:
+ redirect: f5networks.f5_modules.bigip_device_dns
+ bigip_device_group:
+ redirect: f5networks.f5_modules.bigip_device_group
+ bigip_device_group_member:
+ redirect: f5networks.f5_modules.bigip_device_group_member
+ bigip_device_ha_group:
+ redirect: f5networks.f5_modules.bigip_device_ha_group
+ bigip_device_httpd:
+ redirect: f5networks.f5_modules.bigip_device_httpd
+ bigip_device_info:
+ redirect: f5networks.f5_modules.bigip_device_info
+ bigip_device_license:
+ redirect: f5networks.f5_modules.bigip_device_license
+ bigip_device_ntp:
+ redirect: f5networks.f5_modules.bigip_device_ntp
+ bigip_device_sshd:
+ redirect: f5networks.f5_modules.bigip_device_sshd
+ bigip_device_syslog:
+ redirect: f5networks.f5_modules.bigip_device_syslog
+ bigip_device_traffic_group:
+ redirect: f5networks.f5_modules.bigip_device_traffic_group
+ bigip_device_trust:
+ redirect: f5networks.f5_modules.bigip_device_trust
+ bigip_dns_cache_resolver:
+ redirect: f5networks.f5_modules.bigip_dns_cache_resolver
+ bigip_dns_nameserver:
+ redirect: f5networks.f5_modules.bigip_dns_nameserver
+ bigip_dns_resolver:
+ redirect: f5networks.f5_modules.bigip_dns_resolver
+ bigip_dns_zone:
+ redirect: f5networks.f5_modules.bigip_dns_zone
+ bigip_file_copy:
+ redirect: f5networks.f5_modules.bigip_file_copy
+ bigip_firewall_address_list:
+ redirect: f5networks.f5_modules.bigip_firewall_address_list
+ bigip_firewall_dos_profile:
+ redirect: f5networks.f5_modules.bigip_firewall_dos_profile
+ bigip_firewall_dos_vector:
+ redirect: f5networks.f5_modules.bigip_firewall_dos_vector
+ bigip_firewall_global_rules:
+ redirect: f5networks.f5_modules.bigip_firewall_global_rules
+ bigip_firewall_log_profile:
+ redirect: f5networks.f5_modules.bigip_firewall_log_profile
+ bigip_firewall_log_profile_network:
+ redirect: f5networks.f5_modules.bigip_firewall_log_profile_network
+ bigip_firewall_policy:
+ redirect: f5networks.f5_modules.bigip_firewall_policy
+ bigip_firewall_port_list:
+ redirect: f5networks.f5_modules.bigip_firewall_port_list
+ bigip_firewall_rule:
+ redirect: f5networks.f5_modules.bigip_firewall_rule
+ bigip_firewall_rule_list:
+ redirect: f5networks.f5_modules.bigip_firewall_rule_list
+ bigip_firewall_schedule:
+ redirect: f5networks.f5_modules.bigip_firewall_schedule
+ bigip_gtm_datacenter:
+ redirect: f5networks.f5_modules.bigip_gtm_datacenter
+ bigip_gtm_global:
+ redirect: f5networks.f5_modules.bigip_gtm_global
+ bigip_gtm_monitor_bigip:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_bigip
+ bigip_gtm_monitor_external:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_external
+ bigip_gtm_monitor_firepass:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_firepass
+ bigip_gtm_monitor_http:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_http
+ bigip_gtm_monitor_https:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_https
+ bigip_gtm_monitor_tcp:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_tcp
+ bigip_gtm_monitor_tcp_half_open:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_tcp_half_open
+ bigip_gtm_pool:
+ redirect: f5networks.f5_modules.bigip_gtm_pool
+ bigip_gtm_pool_member:
+ redirect: f5networks.f5_modules.bigip_gtm_pool_member
+ bigip_gtm_server:
+ redirect: f5networks.f5_modules.bigip_gtm_server
+ bigip_gtm_topology_record:
+ redirect: f5networks.f5_modules.bigip_gtm_topology_record
+ bigip_gtm_topology_region:
+ redirect: f5networks.f5_modules.bigip_gtm_topology_region
+ bigip_gtm_virtual_server:
+ redirect: f5networks.f5_modules.bigip_gtm_virtual_server
+ bigip_gtm_wide_ip:
+ redirect: f5networks.f5_modules.bigip_gtm_wide_ip
+ bigip_hostname:
+ redirect: f5networks.f5_modules.bigip_hostname
+ bigip_iapp_service:
+ redirect: f5networks.f5_modules.bigip_iapp_service
+ bigip_iapp_template:
+ redirect: f5networks.f5_modules.bigip_iapp_template
+ bigip_ike_peer:
+ redirect: f5networks.f5_modules.bigip_ike_peer
+ bigip_imish_config:
+ redirect: f5networks.f5_modules.bigip_imish_config
+ bigip_ipsec_policy:
+ redirect: f5networks.f5_modules.bigip_ipsec_policy
+ bigip_irule:
+ redirect: f5networks.f5_modules.bigip_irule
+ bigip_log_destination:
+ redirect: f5networks.f5_modules.bigip_log_destination
+ bigip_log_publisher:
+ redirect: f5networks.f5_modules.bigip_log_publisher
+ bigip_lx_package:
+ redirect: f5networks.f5_modules.bigip_lx_package
+ bigip_management_route:
+ redirect: f5networks.f5_modules.bigip_management_route
+ bigip_message_routing_peer:
+ redirect: f5networks.f5_modules.bigip_message_routing_peer
+ bigip_message_routing_protocol:
+ redirect: f5networks.f5_modules.bigip_message_routing_protocol
+ bigip_message_routing_route:
+ redirect: f5networks.f5_modules.bigip_message_routing_route
+ bigip_message_routing_router:
+ redirect: f5networks.f5_modules.bigip_message_routing_router
+ bigip_message_routing_transport_config:
+ redirect: f5networks.f5_modules.bigip_message_routing_transport_config
+ bigip_monitor_dns:
+ redirect: f5networks.f5_modules.bigip_monitor_dns
+ bigip_monitor_external:
+ redirect: f5networks.f5_modules.bigip_monitor_external
+ bigip_monitor_gateway_icmp:
+ redirect: f5networks.f5_modules.bigip_monitor_gateway_icmp
+ bigip_monitor_http:
+ redirect: f5networks.f5_modules.bigip_monitor_http
+ bigip_monitor_https:
+ redirect: f5networks.f5_modules.bigip_monitor_https
+ bigip_monitor_ldap:
+ redirect: f5networks.f5_modules.bigip_monitor_ldap
+ bigip_monitor_snmp_dca:
+ redirect: f5networks.f5_modules.bigip_monitor_snmp_dca
+ bigip_monitor_tcp:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp
+ bigip_monitor_tcp_echo:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp_echo
+ bigip_monitor_tcp_half_open:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp_half_open
+ bigip_monitor_udp:
+ redirect: f5networks.f5_modules.bigip_monitor_udp
+ bigip_node:
+ redirect: f5networks.f5_modules.bigip_node
+ bigip_partition:
+ redirect: f5networks.f5_modules.bigip_partition
+ bigip_password_policy:
+ redirect: f5networks.f5_modules.bigip_password_policy
+ bigip_policy:
+ redirect: f5networks.f5_modules.bigip_policy
+ bigip_policy_rule:
+ redirect: f5networks.f5_modules.bigip_policy_rule
+ bigip_pool:
+ redirect: f5networks.f5_modules.bigip_pool
+ bigip_pool_member:
+ redirect: f5networks.f5_modules.bigip_pool_member
+ bigip_profile_analytics:
+ redirect: f5networks.f5_modules.bigip_profile_analytics
+ bigip_profile_client_ssl:
+ redirect: f5networks.f5_modules.bigip_profile_client_ssl
+ bigip_profile_dns:
+ redirect: f5networks.f5_modules.bigip_profile_dns
+ bigip_profile_fastl4:
+ redirect: f5networks.f5_modules.bigip_profile_fastl4
+ bigip_profile_http:
+ redirect: f5networks.f5_modules.bigip_profile_http
+ bigip_profile_http2:
+ redirect: f5networks.f5_modules.bigip_profile_http2
+ bigip_profile_http_compression:
+ redirect: f5networks.f5_modules.bigip_profile_http_compression
+ bigip_profile_oneconnect:
+ redirect: f5networks.f5_modules.bigip_profile_oneconnect
+ bigip_profile_persistence_cookie:
+ redirect: f5networks.f5_modules.bigip_profile_persistence_cookie
+ bigip_profile_persistence_src_addr:
+ redirect: f5networks.f5_modules.bigip_profile_persistence_src_addr
+ bigip_profile_server_ssl:
+ redirect: f5networks.f5_modules.bigip_profile_server_ssl
+ bigip_profile_tcp:
+ redirect: f5networks.f5_modules.bigip_profile_tcp
+ bigip_profile_udp:
+ redirect: f5networks.f5_modules.bigip_profile_udp
+ bigip_provision:
+ redirect: f5networks.f5_modules.bigip_provision
+ bigip_qkview:
+ redirect: f5networks.f5_modules.bigip_qkview
+ bigip_remote_role:
+ redirect: f5networks.f5_modules.bigip_remote_role
+ bigip_remote_syslog:
+ redirect: f5networks.f5_modules.bigip_remote_syslog
+ bigip_remote_user:
+ redirect: f5networks.f5_modules.bigip_remote_user
+ bigip_routedomain:
+ redirect: f5networks.f5_modules.bigip_routedomain
+ bigip_selfip:
+ redirect: f5networks.f5_modules.bigip_selfip
+ bigip_service_policy:
+ redirect: f5networks.f5_modules.bigip_service_policy
+ bigip_smtp:
+ redirect: f5networks.f5_modules.bigip_smtp
+ bigip_snat_pool:
+ redirect: f5networks.f5_modules.bigip_snat_pool
+ bigip_snat_translation:
+ redirect: f5networks.f5_modules.bigip_snat_translation
+ bigip_snmp:
+ redirect: f5networks.f5_modules.bigip_snmp
+ bigip_snmp_community:
+ redirect: f5networks.f5_modules.bigip_snmp_community
+ bigip_snmp_trap:
+ redirect: f5networks.f5_modules.bigip_snmp_trap
+ bigip_software_image:
+ redirect: f5networks.f5_modules.bigip_software_image
+ bigip_software_install:
+ redirect: f5networks.f5_modules.bigip_software_install
+ bigip_software_update:
+ redirect: f5networks.f5_modules.bigip_software_update
+ bigip_ssl_certificate:
+ redirect: f5networks.f5_modules.bigip_ssl_certificate
+ bigip_ssl_key:
+ redirect: f5networks.f5_modules.bigip_ssl_key
+ bigip_ssl_ocsp:
+ redirect: f5networks.f5_modules.bigip_ssl_ocsp
+ bigip_static_route:
+ redirect: f5networks.f5_modules.bigip_static_route
+ bigip_sys_daemon_log_tmm:
+ redirect: f5networks.f5_modules.bigip_sys_daemon_log_tmm
+ bigip_sys_db:
+ redirect: f5networks.f5_modules.bigip_sys_db
+ bigip_sys_global:
+ redirect: f5networks.f5_modules.bigip_sys_global
+ bigip_timer_policy:
+ redirect: f5networks.f5_modules.bigip_timer_policy
+ bigip_traffic_selector:
+ redirect: f5networks.f5_modules.bigip_traffic_selector
+ bigip_trunk:
+ redirect: f5networks.f5_modules.bigip_trunk
+ bigip_tunnel:
+ redirect: f5networks.f5_modules.bigip_tunnel
+ bigip_ucs:
+ redirect: f5networks.f5_modules.bigip_ucs
+ bigip_ucs_fetch:
+ redirect: f5networks.f5_modules.bigip_ucs_fetch
+ bigip_user:
+ redirect: f5networks.f5_modules.bigip_user
+ bigip_vcmp_guest:
+ redirect: f5networks.f5_modules.bigip_vcmp_guest
+ bigip_virtual_address:
+ redirect: f5networks.f5_modules.bigip_virtual_address
+ bigip_virtual_server:
+ redirect: f5networks.f5_modules.bigip_virtual_server
+ bigip_vlan:
+ redirect: f5networks.f5_modules.bigip_vlan
+ bigip_wait:
+ redirect: f5networks.f5_modules.bigip_wait
+ bigiq_application_fasthttp:
+ redirect: f5networks.f5_modules.bigiq_application_fasthttp
+ bigiq_application_fastl4_tcp:
+ redirect: f5networks.f5_modules.bigiq_application_fastl4_tcp
+ bigiq_application_fastl4_udp:
+ redirect: f5networks.f5_modules.bigiq_application_fastl4_udp
+ bigiq_application_http:
+ redirect: f5networks.f5_modules.bigiq_application_http
+ bigiq_application_https_offload:
+ redirect: f5networks.f5_modules.bigiq_application_https_offload
+ bigiq_application_https_waf:
+ redirect: f5networks.f5_modules.bigiq_application_https_waf
+ bigiq_device_discovery:
+ redirect: f5networks.f5_modules.bigiq_device_discovery
+ bigiq_device_info:
+ redirect: f5networks.f5_modules.bigiq_device_info
+ bigiq_regkey_license:
+ redirect: f5networks.f5_modules.bigiq_regkey_license
+ bigiq_regkey_license_assignment:
+ redirect: f5networks.f5_modules.bigiq_regkey_license_assignment
+ bigiq_regkey_pool:
+ redirect: f5networks.f5_modules.bigiq_regkey_pool
+ bigiq_utility_license:
+ redirect: f5networks.f5_modules.bigiq_utility_license
+ bigiq_utility_license_assignment:
+ redirect: f5networks.f5_modules.bigiq_utility_license_assignment
+ os_auth:
+ redirect: openstack.cloud.auth
+ os_client_config:
+ redirect: openstack.cloud.config
+ os_coe_cluster:
+ redirect: openstack.cloud.coe_cluster
+ os_coe_cluster_template:
+ redirect: openstack.cloud.coe_cluster_template
+ os_flavor_info:
+ redirect: openstack.cloud.compute_flavor_info
+ os_floating_ip:
+ redirect: openstack.cloud.floating_ip
+ os_group:
+ redirect: openstack.cloud.identity_group
+ os_group_info:
+ redirect: openstack.cloud.identity_group_info
+ os_image:
+ redirect: openstack.cloud.image
+ os_image_info:
+ redirect: openstack.cloud.image_info
+ os_ironic:
+ redirect: openstack.cloud.baremetal_node
+ os_ironic_inspect:
+ redirect: openstack.cloud.baremetal_inspect
+ os_ironic_node:
+ redirect: openstack.cloud.baremetal_node_action
+ os_keypair:
+ redirect: openstack.cloud.keypair
+ os_keystone_domain:
+ redirect: openstack.cloud.identity_domain
+ os_keystone_domain_info:
+ redirect: openstack.cloud.identity_domain_info
+ os_keystone_endpoint:
+ redirect: openstack.cloud.endpoint
+ os_keystone_role:
+ redirect: openstack.cloud.identity_role
+ os_keystone_service:
+ redirect: openstack.cloud.catalog_service
+ os_listener:
+ redirect: openstack.cloud.lb_listener
+ os_loadbalancer:
+ redirect: openstack.cloud.loadbalancer
+ os_member:
+ redirect: openstack.cloud.lb_member
+ os_network:
+ redirect: openstack.cloud.network
+ os_networks_info:
+ redirect: openstack.cloud.networks_info
+ os_nova_flavor:
+ redirect: openstack.cloud.compute_flavor
+ os_nova_host_aggregate:
+ redirect: openstack.cloud.host_aggregate
+ os_object:
+ redirect: openstack.cloud.object
+ os_pool:
+ redirect: openstack.cloud.lb_pool
+ os_port:
+ redirect: openstack.cloud.port
+ os_port_info:
+ redirect: openstack.cloud.port_info
+ os_project:
+ redirect: openstack.cloud.project
+ os_project_access:
+ redirect: openstack.cloud.project_access
+ os_project_info:
+ redirect: openstack.cloud.project_info
+ os_quota:
+ redirect: openstack.cloud.quota
+ os_recordset:
+ redirect: openstack.cloud.recordset
+ os_router:
+ redirect: openstack.cloud.router
+ os_security_group:
+ redirect: openstack.cloud.security_group
+ os_security_group_rule:
+ redirect: openstack.cloud.security_group_rule
+ os_server:
+ redirect: openstack.cloud.server
+ os_server_action:
+ redirect: openstack.cloud.server_action
+ os_server_group:
+ redirect: openstack.cloud.server_group
+ os_server_info:
+ redirect: openstack.cloud.server_info
+ os_server_metadata:
+ redirect: openstack.cloud.server_metadata
+ os_server_volume:
+ redirect: openstack.cloud.server_volume
+ os_stack:
+ redirect: openstack.cloud.stack
+ os_subnet:
+ redirect: openstack.cloud.subnet
+ os_subnets_info:
+ redirect: openstack.cloud.subnets_info
+ os_user:
+ redirect: openstack.cloud.identity_user
+ os_user_group:
+ redirect: openstack.cloud.group_assignment
+ os_user_info:
+ redirect: openstack.cloud.identity_user_info
+ os_user_role:
+ redirect: openstack.cloud.role_assignment
+ os_volume:
+ redirect: openstack.cloud.volume
+ os_volume_snapshot:
+ redirect: openstack.cloud.volume_snapshot
+ os_zone:
+ redirect: openstack.cloud.dns_zone
+ junos_acls:
+ redirect: junipernetworks.junos.junos_acls
+ junos_acl_interfaces:
+ redirect: junipernetworks.junos.junos_acl_interfaces
+ junos_ospfv2:
+ redirect: junipernetworks.junos.junos_ospfv2
+ junos_user:
+ redirect: junipernetworks.junos.junos_user
+ junos_l2_interface:
+ redirect: junipernetworks.junos.junos_l2_interface
+ junos_lldp:
+ redirect: junipernetworks.junos.junos_lldp
+ junos_rpc:
+ redirect: junipernetworks.junos.junos_rpc
+ junos_l2_interfaces:
+ redirect: junipernetworks.junos.junos_l2_interfaces
+ junos_lldp_interface:
+ redirect: junipernetworks.junos.junos_lldp_interface
+ junos_static_route:
+ redirect: junipernetworks.junos.junos_static_route
+ junos_lacp:
+ redirect: junipernetworks.junos.junos_lacp
+ junos_lacp_interfaces:
+ redirect: junipernetworks.junos.junos_lacp_interfaces
+ junos_vlans:
+ redirect: junipernetworks.junos.junos_vlans
+ junos_linkagg:
+ redirect: junipernetworks.junos.junos_linkagg
+ junos_scp:
+ redirect: junipernetworks.junos.junos_scp
+ junos_banner:
+ redirect: junipernetworks.junos.junos_banner
+ junos_l3_interface:
+ redirect: junipernetworks.junos.junos_l3_interface
+ junos_logging:
+ redirect: junipernetworks.junos.junos_logging
+ junos_package:
+ redirect: junipernetworks.junos.junos_package
+ junos_netconf:
+ redirect: junipernetworks.junos.junos_netconf
+ junos_facts:
+ redirect: junipernetworks.junos.junos_facts
+ junos_ping:
+ redirect: junipernetworks.junos.junos_ping
+ junos_interface:
+ redirect: junipernetworks.junos.junos_interface
+ junos_lldp_global:
+ redirect: junipernetworks.junos.junos_lldp_global
+ junos_config:
+ redirect: junipernetworks.junos.junos_config
+ junos_static_routes:
+ redirect: junipernetworks.junos.junos_static_routes
+ junos_command:
+ redirect: junipernetworks.junos.junos_command
+ junos_lag_interfaces:
+ redirect: junipernetworks.junos.junos_lag_interfaces
+ junos_l3_interfaces:
+ redirect: junipernetworks.junos.junos_l3_interfaces
+ junos_lldp_interfaces:
+ redirect: junipernetworks.junos.junos_lldp_interfaces
+ junos_vlan:
+ redirect: junipernetworks.junos.junos_vlan
+ junos_system:
+ redirect: junipernetworks.junos.junos_system
+ junos_interfaces:
+ redirect: junipernetworks.junos.junos_interfaces
+ junos_vrf:
+ redirect: junipernetworks.junos.junos_vrf
+ tower_credential:
+ redirect: awx.awx.tower_credential
+ tower_credential_type:
+ redirect: awx.awx.tower_credential_type
+ tower_group:
+ redirect: awx.awx.tower_group
+ tower_host:
+ redirect: awx.awx.tower_host
+ tower_inventory:
+ redirect: awx.awx.tower_inventory
+ tower_inventory_source:
+ redirect: awx.awx.tower_inventory_source
+ tower_job_cancel:
+ redirect: awx.awx.tower_job_cancel
+ tower_job_launch:
+ redirect: awx.awx.tower_job_launch
+ tower_job_list:
+ redirect: awx.awx.tower_job_list
+ tower_job_template:
+ redirect: awx.awx.tower_job_template
+ tower_job_wait:
+ redirect: awx.awx.tower_job_wait
+ tower_label:
+ redirect: awx.awx.tower_label
+ tower_notification:
+ redirect: awx.awx.tower_notification
+ tower_organization:
+ redirect: awx.awx.tower_organization
+ tower_project:
+ redirect: awx.awx.tower_project
+ tower_receive:
+ redirect: awx.awx.tower_receive
+ tower_role:
+ redirect: awx.awx.tower_role
+ tower_send:
+ redirect: awx.awx.tower_send
+ tower_settings:
+ redirect: awx.awx.tower_settings
+ tower_team:
+ redirect: awx.awx.tower_team
+ tower_user:
+ redirect: awx.awx.tower_user
+ tower_workflow_launch:
+ redirect: awx.awx.tower_workflow_launch
+ tower_workflow_template:
+ redirect: awx.awx.tower_workflow_template
+ ovirt_affinity_group:
+ redirect: ovirt.ovirt.ovirt_affinity_group
+ ovirt_affinity_label:
+ redirect: ovirt.ovirt.ovirt_affinity_label
+ ovirt_affinity_label_info:
+ redirect: ovirt.ovirt.ovirt_affinity_label_info
+ ovirt_api_info:
+ redirect: ovirt.ovirt.ovirt_api_info
+ ovirt_auth:
+ redirect: ovirt.ovirt.ovirt_auth
+ ovirt_cluster:
+ redirect: ovirt.ovirt.ovirt_cluster
+ ovirt_cluster_info:
+ redirect: ovirt.ovirt.ovirt_cluster_info
+ ovirt_datacenter:
+ redirect: ovirt.ovirt.ovirt_datacenter
+ ovirt_datacenter_info:
+ redirect: ovirt.ovirt.ovirt_datacenter_info
+ ovirt_disk:
+ redirect: ovirt.ovirt.ovirt_disk
+ ovirt_disk_info:
+ redirect: ovirt.ovirt.ovirt_disk_info
+ ovirt_event:
+ redirect: ovirt.ovirt.ovirt_event
+ ovirt_event_info:
+ redirect: ovirt.ovirt.ovirt_event_info
+ ovirt_external_provider:
+ redirect: ovirt.ovirt.ovirt_external_provider
+ ovirt_external_provider_info:
+ redirect: ovirt.ovirt.ovirt_external_provider_info
+ ovirt_group:
+ redirect: ovirt.ovirt.ovirt_group
+ ovirt_group_info:
+ redirect: ovirt.ovirt.ovirt_group_info
+ ovirt_host:
+ redirect: ovirt.ovirt.ovirt_host
+ ovirt_host_info:
+ redirect: ovirt.ovirt.ovirt_host_info
+ ovirt_host_network:
+ redirect: ovirt.ovirt.ovirt_host_network
+ ovirt_host_pm:
+ redirect: ovirt.ovirt.ovirt_host_pm
+ ovirt_host_storage_info:
+ redirect: ovirt.ovirt.ovirt_host_storage_info
+ ovirt_instance_type:
+ redirect: ovirt.ovirt.ovirt_instance_type
+ ovirt_job:
+ redirect: ovirt.ovirt.ovirt_job
+ ovirt_mac_pool:
+ redirect: ovirt.ovirt.ovirt_mac_pool
+ ovirt_network:
+ redirect: ovirt.ovirt.ovirt_network
+ ovirt_network_info:
+ redirect: ovirt.ovirt.ovirt_network_info
+ ovirt_nic:
+ redirect: ovirt.ovirt.ovirt_nic
+ ovirt_nic_info:
+ redirect: ovirt.ovirt.ovirt_nic_info
+ ovirt_permission:
+ redirect: ovirt.ovirt.ovirt_permission
+ ovirt_permission_info:
+ redirect: ovirt.ovirt.ovirt_permission_info
+ ovirt_quota:
+ redirect: ovirt.ovirt.ovirt_quota
+ ovirt_quota_info:
+ redirect: ovirt.ovirt.ovirt_quota_info
+ ovirt_role:
+ redirect: ovirt.ovirt.ovirt_role
+ ovirt_scheduling_policy_info:
+ redirect: ovirt.ovirt.ovirt_scheduling_policy_info
+ ovirt_snapshot:
+ redirect: ovirt.ovirt.ovirt_snapshot
+ ovirt_snapshot_info:
+ redirect: ovirt.ovirt.ovirt_snapshot_info
+ ovirt_storage_connection:
+ redirect: ovirt.ovirt.ovirt_storage_connection
+ ovirt_storage_domain:
+ redirect: ovirt.ovirt.ovirt_storage_domain
+ ovirt_storage_domain_info:
+ redirect: ovirt.ovirt.ovirt_storage_domain_info
+ ovirt_storage_template_info:
+ redirect: ovirt.ovirt.ovirt_storage_template_info
+ ovirt_storage_vm_info:
+ redirect: ovirt.ovirt.ovirt_storage_vm_info
+ ovirt_tag:
+ redirect: ovirt.ovirt.ovirt_tag
+ ovirt_tag_info:
+ redirect: ovirt.ovirt.ovirt_tag_info
+ ovirt_template:
+ redirect: ovirt.ovirt.ovirt_template
+ ovirt_template_info:
+ redirect: ovirt.ovirt.ovirt_template_info
+ ovirt_user:
+ redirect: ovirt.ovirt.ovirt_user
+ ovirt_user_info:
+ redirect: ovirt.ovirt.ovirt_user_info
+ ovirt_vm:
+ redirect: ovirt.ovirt.ovirt_vm
+ ovirt_vm_info:
+ redirect: ovirt.ovirt.ovirt_vm_info
+ ovirt_vmpool:
+ redirect: ovirt.ovirt.ovirt_vmpool
+ ovirt_vmpool_info:
+ redirect: ovirt.ovirt.ovirt_vmpool_info
+ ovirt_vnic_profile:
+ redirect: ovirt.ovirt.ovirt_vnic_profile
+ ovirt_vnic_profile_info:
+ redirect: ovirt.ovirt.ovirt_vnic_profile_info
+ dellos10_command:
+ redirect: dellemc.os10.os10_command
+ dellos10_facts:
+ redirect: dellemc.os10.os10_facts
+ dellos10_config:
+ redirect: dellemc.os10.os10_config
+ dellos9_facts:
+ redirect: dellemc.os9.os9_facts
+ dellos9_command:
+ redirect: dellemc.os9.os9_command
+ dellos9_config:
+ redirect: dellemc.os9.os9_config
+ dellos6_facts:
+ redirect: dellemc.os6.os6_facts
+ dellos6_config:
+ redirect: dellemc.os6.os6_config
+ dellos6_command:
+ redirect: dellemc.os6.os6_command
+ hcloud_location_facts:
+ redirect: hetzner.hcloud.hcloud_location_facts
+ hcloud_server_info:
+ redirect: hetzner.hcloud.hcloud_server_info
+ hcloud_server_network:
+ redirect: hetzner.hcloud.hcloud_server_network
+ hcloud_server_type_info:
+ redirect: hetzner.hcloud.hcloud_server_type_info
+ hcloud_route:
+ redirect: hetzner.hcloud.hcloud_route
+ hcloud_server:
+ redirect: hetzner.hcloud.hcloud_server
+ hcloud_volume_info:
+ redirect: hetzner.hcloud.hcloud_volume_info
+ hcloud_server_type_facts:
+ redirect: hetzner.hcloud.hcloud_server_type_facts
+ hcloud_ssh_key_info:
+ redirect: hetzner.hcloud.hcloud_ssh_key_info
+ hcloud_network_info:
+ redirect: hetzner.hcloud.hcloud_network_info
+ hcloud_datacenter_info:
+ redirect: hetzner.hcloud.hcloud_datacenter_info
+ hcloud_image_facts:
+ redirect: hetzner.hcloud.hcloud_image_facts
+ hcloud_volume_facts:
+ redirect: hetzner.hcloud.hcloud_volume_facts
+ hcloud_floating_ip_info:
+ redirect: hetzner.hcloud.hcloud_floating_ip_info
+ hcloud_floating_ip_facts:
+ redirect: hetzner.hcloud.hcloud_floating_ip_facts
+ hcloud_image_info:
+ redirect: hetzner.hcloud.hcloud_image_info
+ hcloud_ssh_key_facts:
+ redirect: hetzner.hcloud.hcloud_ssh_key_facts
+ hcloud_location_info:
+ redirect: hetzner.hcloud.hcloud_location_info
+ hcloud_network:
+ redirect: hetzner.hcloud.hcloud_network
+ hcloud_volume:
+ redirect: hetzner.hcloud.hcloud_volume
+ hcloud_ssh_key:
+ redirect: hetzner.hcloud.hcloud_ssh_key
+ hcloud_datacenter_facts:
+ redirect: hetzner.hcloud.hcloud_datacenter_facts
+ hcloud_rdns:
+ redirect: hetzner.hcloud.hcloud_rdns
+ hcloud_floating_ip:
+ redirect: hetzner.hcloud.hcloud_floating_ip
+ hcloud_server_facts:
+ redirect: hetzner.hcloud.hcloud_server_facts
+ hcloud_subnetwork:
+ redirect: hetzner.hcloud.hcloud_subnetwork
+ skydive_capture:
+ redirect: community.skydive.skydive_capture
+ skydive_edge:
+ redirect: community.skydive.skydive_edge
+ skydive_node:
+ redirect: community.skydive.skydive_node
+ cyberark_authentication:
+ redirect: cyberark.pas.cyberark_authentication
+ cyberark_user:
+ redirect: cyberark.pas.cyberark_user
+ gcp_appengine_firewall_rule:
+ redirect: google.cloud.gcp_appengine_firewall_rule
+ gcp_appengine_firewall_rule_info:
+ redirect: google.cloud.gcp_appengine_firewall_rule_info
+ gcp_bigquery_dataset:
+ redirect: google.cloud.gcp_bigquery_dataset
+ gcp_bigquery_dataset_info:
+ redirect: google.cloud.gcp_bigquery_dataset_info
+ gcp_bigquery_table:
+ redirect: google.cloud.gcp_bigquery_table
+ gcp_bigquery_table_info:
+ redirect: google.cloud.gcp_bigquery_table_info
+ gcp_cloudbuild_trigger:
+ redirect: google.cloud.gcp_cloudbuild_trigger
+ gcp_cloudbuild_trigger_info:
+ redirect: google.cloud.gcp_cloudbuild_trigger_info
+ gcp_cloudfunctions_cloud_function:
+ redirect: google.cloud.gcp_cloudfunctions_cloud_function
+ gcp_cloudfunctions_cloud_function_info:
+ redirect: google.cloud.gcp_cloudfunctions_cloud_function_info
+ gcp_cloudscheduler_job:
+ redirect: google.cloud.gcp_cloudscheduler_job
+ gcp_cloudscheduler_job_info:
+ redirect: google.cloud.gcp_cloudscheduler_job_info
+ gcp_cloudtasks_queue:
+ redirect: google.cloud.gcp_cloudtasks_queue
+ gcp_cloudtasks_queue_info:
+ redirect: google.cloud.gcp_cloudtasks_queue_info
+ gcp_compute_address:
+ redirect: google.cloud.gcp_compute_address
+ gcp_compute_address_info:
+ redirect: google.cloud.gcp_compute_address_info
+ gcp_compute_autoscaler:
+ redirect: google.cloud.gcp_compute_autoscaler
+ gcp_compute_autoscaler_info:
+ redirect: google.cloud.gcp_compute_autoscaler_info
+ gcp_compute_backend_bucket:
+ redirect: google.cloud.gcp_compute_backend_bucket
+ gcp_compute_backend_bucket_info:
+ redirect: google.cloud.gcp_compute_backend_bucket_info
+ gcp_compute_backend_service:
+ redirect: google.cloud.gcp_compute_backend_service
+ gcp_compute_backend_service_info:
+ redirect: google.cloud.gcp_compute_backend_service_info
+ gcp_compute_disk:
+ redirect: google.cloud.gcp_compute_disk
+ gcp_compute_disk_info:
+ redirect: google.cloud.gcp_compute_disk_info
+ gcp_compute_firewall:
+ redirect: google.cloud.gcp_compute_firewall
+ gcp_compute_firewall_info:
+ redirect: google.cloud.gcp_compute_firewall_info
+ gcp_compute_forwarding_rule:
+ redirect: google.cloud.gcp_compute_forwarding_rule
+ gcp_compute_forwarding_rule_info:
+ redirect: google.cloud.gcp_compute_forwarding_rule_info
+ gcp_compute_global_address:
+ redirect: google.cloud.gcp_compute_global_address
+ gcp_compute_global_address_info:
+ redirect: google.cloud.gcp_compute_global_address_info
+ gcp_compute_global_forwarding_rule:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule
+ gcp_compute_global_forwarding_rule_info:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule_info
+ gcp_compute_health_check:
+ redirect: google.cloud.gcp_compute_health_check
+ gcp_compute_health_check_info:
+ redirect: google.cloud.gcp_compute_health_check_info
+ gcp_compute_http_health_check:
+ redirect: google.cloud.gcp_compute_http_health_check
+ gcp_compute_http_health_check_info:
+ redirect: google.cloud.gcp_compute_http_health_check_info
+ gcp_compute_https_health_check:
+ redirect: google.cloud.gcp_compute_https_health_check
+ gcp_compute_https_health_check_info:
+ redirect: google.cloud.gcp_compute_https_health_check_info
+ gcp_compute_image:
+ redirect: google.cloud.gcp_compute_image
+ gcp_compute_image_info:
+ redirect: google.cloud.gcp_compute_image_info
+ gcp_compute_instance:
+ redirect: google.cloud.gcp_compute_instance
+ gcp_compute_instance_group:
+ redirect: google.cloud.gcp_compute_instance_group
+ gcp_compute_instance_group_info:
+ redirect: google.cloud.gcp_compute_instance_group_info
+ gcp_compute_instance_group_manager:
+ redirect: google.cloud.gcp_compute_instance_group_manager
+ gcp_compute_instance_group_manager_info:
+ redirect: google.cloud.gcp_compute_instance_group_manager_info
+ gcp_compute_instance_info:
+ redirect: google.cloud.gcp_compute_instance_info
+ gcp_compute_instance_template:
+ redirect: google.cloud.gcp_compute_instance_template
+ gcp_compute_instance_template_info:
+ redirect: google.cloud.gcp_compute_instance_template_info
+ gcp_compute_interconnect_attachment:
+ redirect: google.cloud.gcp_compute_interconnect_attachment
+ gcp_compute_interconnect_attachment_info:
+ redirect: google.cloud.gcp_compute_interconnect_attachment_info
+ gcp_compute_network:
+ redirect: google.cloud.gcp_compute_network
+ gcp_compute_network_endpoint_group:
+ redirect: google.cloud.gcp_compute_network_endpoint_group
+ gcp_compute_network_endpoint_group_info:
+ redirect: google.cloud.gcp_compute_network_endpoint_group_info
+ gcp_compute_network_info:
+ redirect: google.cloud.gcp_compute_network_info
+ gcp_compute_node_group:
+ redirect: google.cloud.gcp_compute_node_group
+ gcp_compute_node_group_info:
+ redirect: google.cloud.gcp_compute_node_group_info
+ gcp_compute_node_template:
+ redirect: google.cloud.gcp_compute_node_template
+ gcp_compute_node_template_info:
+ redirect: google.cloud.gcp_compute_node_template_info
+ gcp_compute_region_backend_service:
+ redirect: google.cloud.gcp_compute_region_backend_service
+ gcp_compute_region_backend_service_info:
+ redirect: google.cloud.gcp_compute_region_backend_service_info
+ gcp_compute_region_disk:
+ redirect: google.cloud.gcp_compute_region_disk
+ gcp_compute_region_disk_info:
+ redirect: google.cloud.gcp_compute_region_disk_info
+ gcp_compute_reservation:
+ redirect: google.cloud.gcp_compute_reservation
+ gcp_compute_reservation_info:
+ redirect: google.cloud.gcp_compute_reservation_info
+ gcp_compute_route:
+ redirect: google.cloud.gcp_compute_route
+ gcp_compute_route_info:
+ redirect: google.cloud.gcp_compute_route_info
+ gcp_compute_router:
+ redirect: google.cloud.gcp_compute_router
+ gcp_compute_router_info:
+ redirect: google.cloud.gcp_compute_router_info
+ gcp_compute_snapshot:
+ redirect: google.cloud.gcp_compute_snapshot
+ gcp_compute_snapshot_info:
+ redirect: google.cloud.gcp_compute_snapshot_info
+ gcp_compute_ssl_certificate:
+ redirect: google.cloud.gcp_compute_ssl_certificate
+ gcp_compute_ssl_certificate_info:
+ redirect: google.cloud.gcp_compute_ssl_certificate_info
+ gcp_compute_ssl_policy:
+ redirect: google.cloud.gcp_compute_ssl_policy
+ gcp_compute_ssl_policy_info:
+ redirect: google.cloud.gcp_compute_ssl_policy_info
+ gcp_compute_subnetwork:
+ redirect: google.cloud.gcp_compute_subnetwork
+ gcp_compute_subnetwork_info:
+ redirect: google.cloud.gcp_compute_subnetwork_info
+ gcp_compute_target_http_proxy:
+ redirect: google.cloud.gcp_compute_target_http_proxy
+ gcp_compute_target_http_proxy_info:
+ redirect: google.cloud.gcp_compute_target_http_proxy_info
+ gcp_compute_target_https_proxy:
+ redirect: google.cloud.gcp_compute_target_https_proxy
+ gcp_compute_target_https_proxy_info:
+ redirect: google.cloud.gcp_compute_target_https_proxy_info
+ gcp_compute_target_instance:
+ redirect: google.cloud.gcp_compute_target_instance
+ gcp_compute_target_instance_info:
+ redirect: google.cloud.gcp_compute_target_instance_info
+ gcp_compute_target_pool:
+ redirect: google.cloud.gcp_compute_target_pool
+ gcp_compute_target_pool_info:
+ redirect: google.cloud.gcp_compute_target_pool_info
+ gcp_compute_target_ssl_proxy:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy
+ gcp_compute_target_ssl_proxy_info:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy_info
+ gcp_compute_target_tcp_proxy:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy
+ gcp_compute_target_tcp_proxy_info:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy_info
+ gcp_compute_target_vpn_gateway:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway
+ gcp_compute_target_vpn_gateway_info:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway_info
+ gcp_compute_url_map:
+ redirect: google.cloud.gcp_compute_url_map
+ gcp_compute_url_map_info:
+ redirect: google.cloud.gcp_compute_url_map_info
+ gcp_compute_vpn_tunnel:
+ redirect: google.cloud.gcp_compute_vpn_tunnel
+ gcp_compute_vpn_tunnel_info:
+ redirect: google.cloud.gcp_compute_vpn_tunnel_info
+ gcp_container_cluster:
+ redirect: google.cloud.gcp_container_cluster
+ gcp_container_cluster_info:
+ redirect: google.cloud.gcp_container_cluster_info
+ gcp_container_node_pool:
+ redirect: google.cloud.gcp_container_node_pool
+ gcp_container_node_pool_info:
+ redirect: google.cloud.gcp_container_node_pool_info
+ gcp_dns_managed_zone:
+ redirect: google.cloud.gcp_dns_managed_zone
+ gcp_dns_managed_zone_info:
+ redirect: google.cloud.gcp_dns_managed_zone_info
+ gcp_dns_resource_record_set:
+ redirect: google.cloud.gcp_dns_resource_record_set
+ gcp_dns_resource_record_set_info:
+ redirect: google.cloud.gcp_dns_resource_record_set_info
+ gcp_filestore_instance:
+ redirect: google.cloud.gcp_filestore_instance
+ gcp_filestore_instance_info:
+ redirect: google.cloud.gcp_filestore_instance_info
+ gcp_iam_role:
+ redirect: google.cloud.gcp_iam_role
+ gcp_iam_role_info:
+ redirect: google.cloud.gcp_iam_role_info
+ gcp_iam_service_account:
+ redirect: google.cloud.gcp_iam_service_account
+ gcp_iam_service_account_info:
+ redirect: google.cloud.gcp_iam_service_account_info
+ gcp_iam_service_account_key:
+ redirect: google.cloud.gcp_iam_service_account_key
+ gcp_kms_crypto_key:
+ redirect: google.cloud.gcp_kms_crypto_key
+ gcp_kms_crypto_key_info:
+ redirect: google.cloud.gcp_kms_crypto_key_info
+ gcp_kms_key_ring:
+ redirect: google.cloud.gcp_kms_key_ring
+ gcp_kms_key_ring_info:
+ redirect: google.cloud.gcp_kms_key_ring_info
+ gcp_logging_metric:
+ redirect: google.cloud.gcp_logging_metric
+ gcp_logging_metric_info:
+ redirect: google.cloud.gcp_logging_metric_info
+ gcp_mlengine_model:
+ redirect: google.cloud.gcp_mlengine_model
+ gcp_mlengine_model_info:
+ redirect: google.cloud.gcp_mlengine_model_info
+ gcp_mlengine_version:
+ redirect: google.cloud.gcp_mlengine_version
+ gcp_mlengine_version_info:
+ redirect: google.cloud.gcp_mlengine_version_info
+ gcp_pubsub_subscription:
+ redirect: google.cloud.gcp_pubsub_subscription
+ gcp_pubsub_subscription_info:
+ redirect: google.cloud.gcp_pubsub_subscription_info
+ gcp_pubsub_topic:
+ redirect: google.cloud.gcp_pubsub_topic
+ gcp_pubsub_topic_info:
+ redirect: google.cloud.gcp_pubsub_topic_info
+ gcp_redis_instance:
+ redirect: google.cloud.gcp_redis_instance
+ gcp_redis_instance_info:
+ redirect: google.cloud.gcp_redis_instance_info
+ gcp_resourcemanager_project:
+ redirect: google.cloud.gcp_resourcemanager_project
+ gcp_resourcemanager_project_info:
+ redirect: google.cloud.gcp_resourcemanager_project_info
+ gcp_runtimeconfig_config:
+ redirect: google.cloud.gcp_runtimeconfig_config
+ gcp_runtimeconfig_config_info:
+ redirect: google.cloud.gcp_runtimeconfig_config_info
+ gcp_runtimeconfig_variable:
+ redirect: google.cloud.gcp_runtimeconfig_variable
+ gcp_runtimeconfig_variable_info:
+ redirect: google.cloud.gcp_runtimeconfig_variable_info
+ gcp_serviceusage_service:
+ redirect: google.cloud.gcp_serviceusage_service
+ gcp_serviceusage_service_info:
+ redirect: google.cloud.gcp_serviceusage_service_info
+ gcp_sourcerepo_repository:
+ redirect: google.cloud.gcp_sourcerepo_repository
+ gcp_sourcerepo_repository_info:
+ redirect: google.cloud.gcp_sourcerepo_repository_info
+ gcp_spanner_database:
+ redirect: google.cloud.gcp_spanner_database
+ gcp_spanner_database_info:
+ redirect: google.cloud.gcp_spanner_database_info
+ gcp_spanner_instance:
+ redirect: google.cloud.gcp_spanner_instance
+ gcp_spanner_instance_info:
+ redirect: google.cloud.gcp_spanner_instance_info
+ gcp_sql_database:
+ redirect: google.cloud.gcp_sql_database
+ gcp_sql_database_info:
+ redirect: google.cloud.gcp_sql_database_info
+ gcp_sql_instance:
+ redirect: google.cloud.gcp_sql_instance
+ gcp_sql_instance_info:
+ redirect: google.cloud.gcp_sql_instance_info
+ gcp_sql_user:
+ redirect: google.cloud.gcp_sql_user
+ gcp_sql_user_info:
+ redirect: google.cloud.gcp_sql_user_info
+ gcp_storage_bucket:
+ redirect: google.cloud.gcp_storage_bucket
+ gcp_storage_bucket_access_control:
+ redirect: google.cloud.gcp_storage_bucket_access_control
+ gcp_storage_object:
+ redirect: google.cloud.gcp_storage_object
+ gcp_tpu_node:
+ redirect: google.cloud.gcp_tpu_node
+ gcp_tpu_node_info:
+ redirect: google.cloud.gcp_tpu_node_info
+ purefa_alert:
+ redirect: purestorage.flasharray.purefa_alert
+ purefa_arrayname:
+ redirect: purestorage.flasharray.purefa_arrayname
+ purefa_banner:
+ redirect: purestorage.flasharray.purefa_banner
+ purefa_connect:
+ redirect: purestorage.flasharray.purefa_connect
+ purefa_dns:
+ redirect: purestorage.flasharray.purefa_dns
+ purefa_ds:
+ redirect: purestorage.flasharray.purefa_ds
+ purefa_dsrole:
+ redirect: purestorage.flasharray.purefa_dsrole
+ purefa_hg:
+ redirect: purestorage.flasharray.purefa_hg
+ purefa_host:
+ redirect: purestorage.flasharray.purefa_host
+ purefa_info:
+ redirect: purestorage.flasharray.purefa_info
+ purefa_ntp:
+ redirect: purestorage.flasharray.purefa_ntp
+ purefa_offload:
+ redirect: purestorage.flasharray.purefa_offload
+ purefa_pg:
+ redirect: purestorage.flasharray.purefa_pg
+ purefa_pgsnap:
+ redirect: purestorage.flasharray.purefa_pgsnap
+ purefa_phonehome:
+ redirect: purestorage.flasharray.purefa_phonehome
+ purefa_ra:
+ redirect: purestorage.flasharray.purefa_ra
+ purefa_smtp:
+ redirect: purestorage.flasharray.purefa_smtp
+ purefa_snap:
+ redirect: purestorage.flasharray.purefa_snap
+ purefa_snmp:
+ redirect: purestorage.flasharray.purefa_snmp
+ purefa_syslog:
+ redirect: purestorage.flasharray.purefa_syslog
+ purefa_user:
+ redirect: purestorage.flasharray.purefa_user
+ purefa_vg:
+ redirect: purestorage.flasharray.purefa_vg
+ purefa_volume:
+ redirect: purestorage.flasharray.purefa_volume
+ purefb_bucket:
+ redirect: purestorage.flashblade.purefb_bucket
+ purefb_ds:
+ redirect: purestorage.flashblade.purefb_ds
+ purefb_dsrole:
+ redirect: purestorage.flashblade.purefb_dsrole
+ purefb_fs:
+ redirect: purestorage.flashblade.purefb_fs
+ purefb_info:
+ redirect: purestorage.flashblade.purefb_info
+ purefb_network:
+ redirect: purestorage.flashblade.purefb_network
+ purefb_ra:
+ redirect: purestorage.flashblade.purefb_ra
+ purefb_s3acc:
+ redirect: purestorage.flashblade.purefb_s3acc
+ purefb_s3user:
+ redirect: purestorage.flashblade.purefb_s3user
+ purefb_smtp:
+ redirect: purestorage.flashblade.purefb_smtp
+ purefb_snap:
+ redirect: purestorage.flashblade.purefb_snap
+ purefb_subnet:
+ redirect: purestorage.flashblade.purefb_subnet
+ azure_rm_acs:
+ redirect: azure.azcollection.azure_rm_acs
+ azure_rm_virtualmachine_info:
+ redirect: azure.azcollection.azure_rm_virtualmachine_info
+ azure_rm_dnsrecordset_info:
+ redirect: azure.azcollection.azure_rm_dnsrecordset_info
+ azure_rm_dnszone_info:
+ redirect: azure.azcollection.azure_rm_dnszone_info
+ azure_rm_networkinterface_info:
+ redirect: azure.azcollection.azure_rm_networkinterface_info
+ azure_rm_publicipaddress_info:
+ redirect: azure.azcollection.azure_rm_publicipaddress_info
+ azure_rm_securitygroup_info:
+ redirect: azure.azcollection.azure_rm_securitygroup_info
+ azure_rm_storageaccount_info:
+ redirect: azure.azcollection.azure_rm_storageaccount_info
+ azure_rm_virtualnetwork_info:
+ redirect: azure.azcollection.azure_rm_virtualnetwork_info
+ azure_rm_deployment:
+ redirect: azure.azcollection.azure_rm_deployment
+ azure_rm_dnsrecordset:
+ redirect: azure.azcollection.azure_rm_dnsrecordset
+ azure_rm_dnszone:
+ redirect: azure.azcollection.azure_rm_dnszone
+ azure_rm_networkinterface:
+ redirect: azure.azcollection.azure_rm_networkinterface
+ azure_rm_publicipaddress:
+ redirect: azure.azcollection.azure_rm_publicipaddress
+ azure_rm_securitygroup:
+ redirect: azure.azcollection.azure_rm_securitygroup
+ azure_rm_storageaccount:
+ redirect: azure.azcollection.azure_rm_storageaccount
+ azure_rm_subnet:
+ redirect: azure.azcollection.azure_rm_subnet
+ azure_rm_virtualmachine:
+ redirect: azure.azcollection.azure_rm_virtualmachine
+ azure_rm_virtualnetwork:
+ redirect: azure.azcollection.azure_rm_virtualnetwork
+ azure_rm_aks:
+ redirect: azure.azcollection.azure_rm_aks
+ azure_rm_aks_info:
+ redirect: azure.azcollection.azure_rm_aks_info
+ azure_rm_aksversion_info:
+ redirect: azure.azcollection.azure_rm_aksversion_info
+ azure_rm_appgateway:
+ redirect: azure.azcollection.azure_rm_appgateway
+ azure_rm_applicationsecuritygroup:
+ redirect: azure.azcollection.azure_rm_applicationsecuritygroup
+ azure_rm_applicationsecuritygroup_info:
+ redirect: azure.azcollection.azure_rm_applicationsecuritygroup_info
+ azure_rm_appserviceplan:
+ redirect: azure.azcollection.azure_rm_appserviceplan
+ azure_rm_appserviceplan_info:
+ redirect: azure.azcollection.azure_rm_appserviceplan_info
+ azure_rm_availabilityset:
+ redirect: azure.azcollection.azure_rm_availabilityset
+ azure_rm_availabilityset_info:
+ redirect: azure.azcollection.azure_rm_availabilityset_info
+ azure_rm_containerinstance:
+ redirect: azure.azcollection.azure_rm_containerinstance
+ azure_rm_containerinstance_info:
+ redirect: azure.azcollection.azure_rm_containerinstance_info
+ azure_rm_containerregistry:
+ redirect: azure.azcollection.azure_rm_containerregistry
+ azure_rm_containerregistry_info:
+ redirect: azure.azcollection.azure_rm_containerregistry_info
+ azure_rm_deployment_info:
+ redirect: azure.azcollection.azure_rm_deployment_info
+ azure_rm_functionapp:
+ redirect: azure.azcollection.azure_rm_functionapp
+ azure_rm_functionapp_info:
+ redirect: azure.azcollection.azure_rm_functionapp_info
+ azure_rm_gallery:
+ redirect: azure.azcollection.azure_rm_gallery
+ azure_rm_gallery_info:
+ redirect: azure.azcollection.azure_rm_gallery_info
+ azure_rm_galleryimage:
+ redirect: azure.azcollection.azure_rm_galleryimage
+ azure_rm_galleryimage_info:
+ redirect: azure.azcollection.azure_rm_galleryimage_info
+ azure_rm_galleryimageversion:
+ redirect: azure.azcollection.azure_rm_galleryimageversion
+ azure_rm_galleryimageversion_info:
+ redirect: azure.azcollection.azure_rm_galleryimageversion_info
+ azure_rm_image:
+ redirect: azure.azcollection.azure_rm_image
+ azure_rm_image_info:
+ redirect: azure.azcollection.azure_rm_image_info
+ azure_rm_keyvault:
+ redirect: azure.azcollection.azure_rm_keyvault
+ azure_rm_keyvault_info:
+ redirect: azure.azcollection.azure_rm_keyvault_info
+ azure_rm_keyvaultkey:
+ redirect: azure.azcollection.azure_rm_keyvaultkey
+ azure_rm_keyvaultkey_info:
+ redirect: azure.azcollection.azure_rm_keyvaultkey_info
+ azure_rm_keyvaultsecret:
+ redirect: azure.azcollection.azure_rm_keyvaultsecret
+ azure_rm_manageddisk:
+ redirect: azure.azcollection.azure_rm_manageddisk
+ azure_rm_manageddisk_info:
+ redirect: azure.azcollection.azure_rm_manageddisk_info
+ azure_rm_resource:
+ redirect: azure.azcollection.azure_rm_resource
+ azure_rm_resource_info:
+ redirect: azure.azcollection.azure_rm_resource_info
+ azure_rm_resourcegroup:
+ redirect: azure.azcollection.azure_rm_resourcegroup
+ azure_rm_resourcegroup_info:
+ redirect: azure.azcollection.azure_rm_resourcegroup_info
+ azure_rm_snapshot:
+ redirect: azure.azcollection.azure_rm_snapshot
+ azure_rm_storageblob:
+ redirect: azure.azcollection.azure_rm_storageblob
+ azure_rm_subnet_info:
+ redirect: azure.azcollection.azure_rm_subnet_info
+ azure_rm_virtualmachineextension:
+ redirect: azure.azcollection.azure_rm_virtualmachineextension
+ azure_rm_virtualmachineextension_info:
+ redirect: azure.azcollection.azure_rm_virtualmachineextension_info
+ azure_rm_virtualmachineimage_info:
+ redirect: azure.azcollection.azure_rm_virtualmachineimage_info
+ azure_rm_virtualmachinescaleset:
+ redirect: azure.azcollection.azure_rm_virtualmachinescaleset
+ azure_rm_virtualmachinescaleset_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescaleset_info
+ azure_rm_virtualmachinescalesetextension:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetextension
+ azure_rm_virtualmachinescalesetextension_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetextension_info
+ azure_rm_virtualmachinescalesetinstance:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetinstance
+ azure_rm_virtualmachinescalesetinstance_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetinstance_info
+ azure_rm_webapp:
+ redirect: azure.azcollection.azure_rm_webapp
+ azure_rm_webapp_info:
+ redirect: azure.azcollection.azure_rm_webapp_info
+ azure_rm_webappslot:
+ redirect: azure.azcollection.azure_rm_webappslot
+ azure_rm_automationaccount:
+ redirect: azure.azcollection.azure_rm_automationaccount
+ azure_rm_automationaccount_info:
+ redirect: azure.azcollection.azure_rm_automationaccount_info
+ azure_rm_autoscale:
+ redirect: azure.azcollection.azure_rm_autoscale
+ azure_rm_autoscale_info:
+ redirect: azure.azcollection.azure_rm_autoscale_info
+ azure_rm_azurefirewall:
+ redirect: azure.azcollection.azure_rm_azurefirewall
+ azure_rm_azurefirewall_info:
+ redirect: azure.azcollection.azure_rm_azurefirewall_info
+ azure_rm_batchaccount:
+ redirect: azure.azcollection.azure_rm_batchaccount
+ azure_rm_cdnendpoint:
+ redirect: azure.azcollection.azure_rm_cdnendpoint
+ azure_rm_cdnendpoint_info:
+ redirect: azure.azcollection.azure_rm_cdnendpoint_info
+ azure_rm_cdnprofile:
+ redirect: azure.azcollection.azure_rm_cdnprofile
+ azure_rm_cdnprofile_info:
+ redirect: azure.azcollection.azure_rm_cdnprofile_info
+ azure_rm_iotdevice:
+ redirect: azure.azcollection.azure_rm_iotdevice
+ azure_rm_iotdevice_info:
+ redirect: azure.azcollection.azure_rm_iotdevice_info
+ azure_rm_iotdevicemodule:
+ redirect: azure.azcollection.azure_rm_iotdevicemodule
+ azure_rm_iothub:
+ redirect: azure.azcollection.azure_rm_iothub
+ azure_rm_iothub_info:
+ redirect: azure.azcollection.azure_rm_iothub_info
+ azure_rm_iothubconsumergroup:
+ redirect: azure.azcollection.azure_rm_iothubconsumergroup
+ azure_rm_loadbalancer:
+ redirect: azure.azcollection.azure_rm_loadbalancer
+ azure_rm_loadbalancer_info:
+ redirect: azure.azcollection.azure_rm_loadbalancer_info
+ azure_rm_lock:
+ redirect: azure.azcollection.azure_rm_lock
+ azure_rm_lock_info:
+ redirect: azure.azcollection.azure_rm_lock_info
+ azure_rm_loganalyticsworkspace:
+ redirect: azure.azcollection.azure_rm_loganalyticsworkspace
+ azure_rm_loganalyticsworkspace_info:
+ redirect: azure.azcollection.azure_rm_loganalyticsworkspace_info
+ azure_rm_monitorlogprofile:
+ redirect: azure.azcollection.azure_rm_monitorlogprofile
+ azure_rm_rediscache:
+ redirect: azure.azcollection.azure_rm_rediscache
+ azure_rm_rediscache_info:
+ redirect: azure.azcollection.azure_rm_rediscache_info
+ azure_rm_rediscachefirewallrule:
+ redirect: azure.azcollection.azure_rm_rediscachefirewallrule
+ azure_rm_roleassignment:
+ redirect: azure.azcollection.azure_rm_roleassignment
+ azure_rm_roleassignment_info:
+ redirect: azure.azcollection.azure_rm_roleassignment_info
+ azure_rm_roledefinition:
+ redirect: azure.azcollection.azure_rm_roledefinition
+ azure_rm_roledefinition_info:
+ redirect: azure.azcollection.azure_rm_roledefinition_info
+ azure_rm_route:
+ redirect: azure.azcollection.azure_rm_route
+ azure_rm_routetable:
+ redirect: azure.azcollection.azure_rm_routetable
+ azure_rm_routetable_info:
+ redirect: azure.azcollection.azure_rm_routetable_info
+ azure_rm_servicebus:
+ redirect: azure.azcollection.azure_rm_servicebus
+ azure_rm_servicebus_info:
+ redirect: azure.azcollection.azure_rm_servicebus_info
+ azure_rm_servicebusqueue:
+ redirect: azure.azcollection.azure_rm_servicebusqueue
+ azure_rm_servicebussaspolicy:
+ redirect: azure.azcollection.azure_rm_servicebussaspolicy
+ azure_rm_servicebustopic:
+ redirect: azure.azcollection.azure_rm_servicebustopic
+ azure_rm_servicebustopicsubscription:
+ redirect: azure.azcollection.azure_rm_servicebustopicsubscription
+ azure_rm_trafficmanagerendpoint:
+ redirect: azure.azcollection.azure_rm_trafficmanagerendpoint
+ azure_rm_trafficmanagerendpoint_info:
+ redirect: azure.azcollection.azure_rm_trafficmanagerendpoint_info
+ azure_rm_trafficmanagerprofile:
+ redirect: azure.azcollection.azure_rm_trafficmanagerprofile
+ azure_rm_trafficmanagerprofile_info:
+ redirect: azure.azcollection.azure_rm_trafficmanagerprofile_info
+ azure_rm_virtualnetworkgateway:
+ redirect: azure.azcollection.azure_rm_virtualnetworkgateway
+ azure_rm_virtualnetworkpeering:
+ redirect: azure.azcollection.azure_rm_virtualnetworkpeering
+ azure_rm_virtualnetworkpeering_info:
+ redirect: azure.azcollection.azure_rm_virtualnetworkpeering_info
+ azure_rm_cosmosdbaccount:
+ redirect: azure.azcollection.azure_rm_cosmosdbaccount
+ azure_rm_cosmosdbaccount_info:
+ redirect: azure.azcollection.azure_rm_cosmosdbaccount_info
+ azure_rm_devtestlab:
+ redirect: azure.azcollection.azure_rm_devtestlab
+ azure_rm_devtestlab_info:
+ redirect: azure.azcollection.azure_rm_devtestlab_info
+ azure_rm_devtestlabarmtemplate_info:
+ redirect: azure.azcollection.azure_rm_devtestlabarmtemplate_info
+ azure_rm_devtestlabartifact_info:
+ redirect: azure.azcollection.azure_rm_devtestlabartifact_info
+ azure_rm_devtestlabartifactsource:
+ redirect: azure.azcollection.azure_rm_devtestlabartifactsource
+ azure_rm_devtestlabartifactsource_info:
+ redirect: azure.azcollection.azure_rm_devtestlabartifactsource_info
+ azure_rm_devtestlabcustomimage:
+ redirect: azure.azcollection.azure_rm_devtestlabcustomimage
+ azure_rm_devtestlabcustomimage_info:
+ redirect: azure.azcollection.azure_rm_devtestlabcustomimage_info
+ azure_rm_devtestlabenvironment:
+ redirect: azure.azcollection.azure_rm_devtestlabenvironment
+ azure_rm_devtestlabenvironment_info:
+ redirect: azure.azcollection.azure_rm_devtestlabenvironment_info
+ azure_rm_devtestlabpolicy:
+ redirect: azure.azcollection.azure_rm_devtestlabpolicy
+ azure_rm_devtestlabpolicy_info:
+ redirect: azure.azcollection.azure_rm_devtestlabpolicy_info
+ azure_rm_devtestlabschedule:
+ redirect: azure.azcollection.azure_rm_devtestlabschedule
+ azure_rm_devtestlabschedule_info:
+ redirect: azure.azcollection.azure_rm_devtestlabschedule_info
+ azure_rm_devtestlabvirtualmachine:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualmachine
+ azure_rm_devtestlabvirtualmachine_info:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualmachine_info
+ azure_rm_devtestlabvirtualnetwork:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualnetwork
+ azure_rm_devtestlabvirtualnetwork_info:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualnetwork_info
+ azure_rm_hdinsightcluster:
+ redirect: azure.azcollection.azure_rm_hdinsightcluster
+ azure_rm_hdinsightcluster_info:
+ redirect: azure.azcollection.azure_rm_hdinsightcluster_info
+ azure_rm_mariadbconfiguration:
+ redirect: azure.azcollection.azure_rm_mariadbconfiguration
+ azure_rm_mariadbconfiguration_info:
+ redirect: azure.azcollection.azure_rm_mariadbconfiguration_info
+ azure_rm_mariadbdatabase:
+ redirect: azure.azcollection.azure_rm_mariadbdatabase
+ azure_rm_mariadbdatabase_info:
+ redirect: azure.azcollection.azure_rm_mariadbdatabase_info
+ azure_rm_mariadbfirewallrule:
+ redirect: azure.azcollection.azure_rm_mariadbfirewallrule
+ azure_rm_mariadbfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_mariadbfirewallrule_info
+ azure_rm_mariadbserver:
+ redirect: azure.azcollection.azure_rm_mariadbserver
+ azure_rm_mariadbserver_info:
+ redirect: azure.azcollection.azure_rm_mariadbserver_info
+ azure_rm_mysqlconfiguration:
+ redirect: azure.azcollection.azure_rm_mysqlconfiguration
+ azure_rm_mysqlconfiguration_info:
+ redirect: azure.azcollection.azure_rm_mysqlconfiguration_info
+ azure_rm_mysqldatabase:
+ redirect: azure.azcollection.azure_rm_mysqldatabase
+ azure_rm_mysqldatabase_info:
+ redirect: azure.azcollection.azure_rm_mysqldatabase_info
+ azure_rm_mysqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_mysqlfirewallrule
+ azure_rm_mysqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_mysqlfirewallrule_info
+ azure_rm_mysqlserver:
+ redirect: azure.azcollection.azure_rm_mysqlserver
+ azure_rm_mysqlserver_info:
+ redirect: azure.azcollection.azure_rm_mysqlserver_info
+ azure_rm_postgresqlconfiguration:
+ redirect: azure.azcollection.azure_rm_postgresqlconfiguration
+ azure_rm_postgresqlconfiguration_info:
+ redirect: azure.azcollection.azure_rm_postgresqlconfiguration_info
+ azure_rm_postgresqldatabase:
+ redirect: azure.azcollection.azure_rm_postgresqldatabase
+ azure_rm_postgresqldatabase_info:
+ redirect: azure.azcollection.azure_rm_postgresqldatabase_info
+ azure_rm_postgresqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_postgresqlfirewallrule
+ azure_rm_postgresqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_postgresqlfirewallrule_info
+ azure_rm_postgresqlserver:
+ redirect: azure.azcollection.azure_rm_postgresqlserver
+ azure_rm_postgresqlserver_info:
+ redirect: azure.azcollection.azure_rm_postgresqlserver_info
+ azure_rm_sqldatabase:
+ redirect: azure.azcollection.azure_rm_sqldatabase
+ azure_rm_sqldatabase_info:
+ redirect: azure.azcollection.azure_rm_sqldatabase_info
+ azure_rm_sqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_sqlfirewallrule
+ azure_rm_sqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_sqlfirewallrule_info
+ azure_rm_sqlserver:
+ redirect: azure.azcollection.azure_rm_sqlserver
+ azure_rm_sqlserver_info:
+ redirect: azure.azcollection.azure_rm_sqlserver_info
+ openvswitch_port:
+ redirect: openvswitch.openvswitch.openvswitch_port
+ openvswitch_db:
+ redirect: openvswitch.openvswitch.openvswitch_db
+ openvswitch_bridge:
+ redirect: openvswitch.openvswitch.openvswitch_bridge
+ vyos_ospfv2:
+ redirect: vyos.vyos.vyos_ospfv2
+ vyos_l3_interface:
+ redirect: vyos.vyos.vyos_l3_interface
+ vyos_banner:
+ redirect: vyos.vyos.vyos_banner
+ vyos_firewall_rules:
+ redirect: vyos.vyos.vyos_firewall_rules
+ vyos_static_route:
+ redirect: vyos.vyos.vyos_static_route
+ vyos_lldp_interface:
+ redirect: vyos.vyos.vyos_lldp_interface
+ vyos_vlan:
+ redirect: vyos.vyos.vyos_vlan
+ vyos_user:
+ redirect: vyos.vyos.vyos_user
+ vyos_firewall_interfaces:
+ redirect: vyos.vyos.vyos_firewall_interfaces
+ vyos_interface:
+ redirect: vyos.vyos.vyos_interface
+ vyos_firewall_global:
+ redirect: vyos.vyos.vyos_firewall_global
+ vyos_config:
+ redirect: vyos.vyos.vyos_config
+ vyos_facts:
+ redirect: vyos.vyos.vyos_facts
+ vyos_linkagg:
+ redirect: vyos.vyos.vyos_linkagg
+ vyos_ping:
+ redirect: vyos.vyos.vyos_ping
+ vyos_lag_interfaces:
+ redirect: vyos.vyos.vyos_lag_interfaces
+ vyos_lldp:
+ redirect: vyos.vyos.vyos_lldp
+ vyos_lldp_global:
+ redirect: vyos.vyos.vyos_lldp_global
+ vyos_l3_interfaces:
+ redirect: vyos.vyos.vyos_l3_interfaces
+ vyos_lldp_interfaces:
+ redirect: vyos.vyos.vyos_lldp_interfaces
+ vyos_interfaces:
+ redirect: vyos.vyos.vyos_interfaces
+ vyos_logging:
+ redirect: vyos.vyos.vyos_logging
+ vyos_static_routes:
+ redirect: vyos.vyos.vyos_static_routes
+ vyos_command:
+ redirect: vyos.vyos.vyos_command
+ vyos_system:
+ redirect: vyos.vyos.vyos_system
+ cpm_plugconfig:
+ redirect: wti.remote.cpm_plugconfig
+ cpm_plugcontrol:
+ redirect: wti.remote.cpm_plugcontrol
+ cpm_serial_port_config:
+ redirect: wti.remote.cpm_serial_port_config
+ cpm_serial_port_info:
+ redirect: wti.remote.cpm_serial_port_info
+ cpm_user:
+ redirect: wti.remote.cpm_user
+ module_utils:
+ # test entries
+ formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ sub1.sub2.formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ # real
+ acme:
+ redirect: community.crypto.acme
+ alicloud_ecs:
+ redirect: community.general.alicloud_ecs
+ ansible_tower:
+ redirect: awx.awx.ansible_tower
+ aws.batch:
+ redirect: amazon.aws.batch
+ aws.cloudfront_facts:
+ redirect: amazon.aws.cloudfront_facts
+ aws.core:
+ redirect: amazon.aws.core
+ aws.direct_connect:
+ redirect: amazon.aws.direct_connect
+ aws.elb_utils:
+ redirect: amazon.aws.elb_utils
+ aws.elbv2:
+ redirect: amazon.aws.elbv2
+ aws.iam:
+ redirect: amazon.aws.iam
+ aws.rds:
+ redirect: amazon.aws.rds
+ aws.s3:
+ redirect: amazon.aws.s3
+ aws.urls:
+ redirect: amazon.aws.urls
+ aws.waf:
+ redirect: amazon.aws.waf
+ aws.waiters:
+ redirect: amazon.aws.waiters
+ azure_rm_common:
+ redirect: azure.azcollection.azure_rm_common
+ azure_rm_common_ext:
+ redirect: azure.azcollection.azure_rm_common_ext
+ azure_rm_common_rest:
+ redirect: azure.azcollection.azure_rm_common_rest
+ cloud:
+ redirect: community.general.cloud
+ cloudscale:
+ redirect: cloudscale_ch.cloud.api
+ cloudstack:
+ redirect: ngine_io.cloudstack.cloudstack
+ compat.ipaddress:
+ redirect: ansible.netcommon.compat.ipaddress
+ crypto:
+ redirect: community.crypto.crypto
+ database:
+ redirect: community.general.database
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ dimensiondata:
+ redirect: community.general.dimensiondata
+ docker:
+ redirect: community.docker.common
+ docker.common:
+ redirect: community.docker.common
+ docker.swarm:
+ redirect: community.docker.swarm
+ ec2:
+ redirect: amazon.aws.ec2
+ ecs:
+ redirect: community.crypto.ecs
+ ecs.api:
+ redirect: community.crypto.ecs.api
+ exoscale:
+ redirect: ngine_io.exoscale.exoscale
+ f5_utils:
+ tombstone:
+ removal_date: 2019-11-06
+ firewalld:
+ redirect: ansible.posix.firewalld
+ gcdns:
+ redirect: community.google.gcdns
+ gce:
+ redirect: community.google.gce
+ gcp:
+ redirect: community.google.gcp
+ gcp_utils:
+ redirect: google.cloud.gcp_utils
+ gitlab:
+ redirect: community.general.gitlab
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ heroku:
+ redirect: community.general.heroku
+ hetzner:
+ redirect: community.hrobot.robot
+ hwc_utils:
+ redirect: community.general.hwc_utils
+ ibm_sa_utils:
+ redirect: community.general.ibm_sa_utils
+ identity:
+ redirect: community.general.identity
+ identity.keycloak:
+ redirect: community.general.identity.keycloak
+ identity.keycloak.keycloak:
+ redirect: community.general.identity.keycloak.keycloak
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ influxdb:
+ redirect: community.general.influxdb
+ ipa:
+ redirect: community.general.ipa
+ ismount:
+ redirect: ansible.posix.mount
+ k8s.common:
+ redirect: kubernetes.core.common
+ k8s.raw:
+ redirect: kubernetes.core.raw
+ k8s.scale:
+ redirect: kubernetes.core.scale
+ known_hosts:
+ redirect: community.general.known_hosts
+ kubevirt:
+ redirect: community.kubevirt.kubevirt
+ ldap:
+ redirect: community.general.ldap
+ linode:
+ redirect: community.general.linode
+ lxd:
+ redirect: community.general.lxd
+ manageiq:
+ redirect: community.general.manageiq
+ memset:
+ redirect: community.general.memset
+ mysql:
+ redirect: community.mysql.mysql
+ net_tools.netbox.netbox_utils:
+ redirect: netbox.netbox.netbox_utils
+ net_tools.nios:
+ redirect: community.general.net_tools.nios
+ net_tools.nios.api:
+ redirect: community.general.net_tools.nios.api
+ netapp:
+ redirect: netapp.ontap.netapp
+ netapp_elementsw_module:
+ redirect: netapp.ontap.netapp_elementsw_module
+ netapp_module:
+ redirect: netapp.ontap.netapp_module
+ network.a10.a10:
+ redirect: community.network.network.a10.a10
+ network.aci.aci:
+ redirect: cisco.aci.aci
+ network.aci.mso:
+ redirect: cisco.mso.mso
+ network.aireos.aireos:
+ redirect: community.network.network.aireos.aireos
+ network.aos.aos:
+ redirect: community.network.network.aos.aos
+ network.aruba.aruba:
+ redirect: community.network.network.aruba.aruba
+ network.asa.asa:
+ redirect: cisco.asa.network.asa.asa
+ network.avi.ansible_utils:
+ redirect: community.network.network.avi.ansible_utils
+ network.avi.avi:
+ redirect: community.network.network.avi.avi
+ network.avi.avi_api:
+ redirect: community.network.network.avi.avi_api
+ network.bigswitch.bigswitch:
+ redirect: community.network.network.bigswitch.bigswitch
+ network.checkpoint.checkpoint:
+ redirect: check_point.mgmt.checkpoint
+ network.cloudengine.ce:
+ redirect: community.network.network.cloudengine.ce
+ network.cnos.cnos:
+ redirect: community.network.network.cnos.cnos
+ network.cnos.cnos_devicerules:
+ redirect: community.network.network.cnos.cnos_devicerules
+ network.cnos.cnos_errorcodes:
+ redirect: community.network.network.cnos.cnos_errorcodes
+ network.common.cfg.base:
+ redirect: ansible.netcommon.network.common.cfg.base
+ network.common.config:
+ redirect: ansible.netcommon.network.common.config
+ network.common.facts.facts:
+ redirect: ansible.netcommon.network.common.facts.facts
+ network.common.netconf:
+ redirect: ansible.netcommon.network.common.netconf
+ network.common.network:
+ redirect: ansible.netcommon.network.common.network
+ network.common.parsing:
+ redirect: ansible.netcommon.network.common.parsing
+ network.common.utils:
+ redirect: ansible.netcommon.network.common.utils
+ network.dellos10.dellos10:
+ redirect: dellemc.os10.network.os10
+ network.dellos9.dellos9:
+ redirect: dellemc.os9.network.os9
+ network.dellos6.dellos6:
+ redirect: dellemc.os6.network.os6
+ network.edgeos.edgeos:
+ redirect: community.network.network.edgeos.edgeos
+ network.edgeswitch.edgeswitch:
+ redirect: community.network.network.edgeswitch.edgeswitch
+ network.edgeswitch.edgeswitch_interface:
+ redirect: community.network.network.edgeswitch.edgeswitch_interface
+ network.enos.enos:
+ redirect: community.network.network.enos.enos
+ network.eos.argspec.facts:
+ redirect: arista.eos.network.eos.argspec.facts
+ network.eos.argspec.facts.facts:
+ redirect: arista.eos.network.eos.argspec.facts.facts
+ network.eos.argspec.interfaces:
+ redirect: arista.eos.network.eos.argspec.interfaces
+ network.eos.argspec.interfaces.interfaces:
+ redirect: arista.eos.network.eos.argspec.interfaces.interfaces
+ network.eos.argspec.l2_interfaces:
+ redirect: arista.eos.network.eos.argspec.l2_interfaces
+ network.eos.argspec.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.argspec.l2_interfaces.l2_interfaces
+ network.eos.argspec.l3_interfaces:
+ redirect: arista.eos.network.eos.argspec.l3_interfaces
+ network.eos.argspec.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.argspec.l3_interfaces.l3_interfaces
+ network.eos.argspec.lacp:
+ redirect: arista.eos.network.eos.argspec.lacp
+ network.eos.argspec.lacp.lacp:
+ redirect: arista.eos.network.eos.argspec.lacp.lacp
+ network.eos.argspec.lacp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lacp_interfaces
+ network.eos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lacp_interfaces.lacp_interfaces
+ network.eos.argspec.lag_interfaces:
+ redirect: arista.eos.network.eos.argspec.lag_interfaces
+ network.eos.argspec.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.argspec.lag_interfaces.lag_interfaces
+ network.eos.argspec.lldp_global:
+ redirect: arista.eos.network.eos.argspec.lldp_global
+ network.eos.argspec.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.argspec.lldp_global.lldp_global
+ network.eos.argspec.lldp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lldp_interfaces
+ network.eos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lldp_interfaces.lldp_interfaces
+ network.eos.argspec.vlans:
+ redirect: arista.eos.network.eos.argspec.vlans
+ network.eos.argspec.vlans.vlans:
+ redirect: arista.eos.network.eos.argspec.vlans.vlans
+ network.eos.config:
+ redirect: arista.eos.network.eos.config
+ network.eos.config.interfaces:
+ redirect: arista.eos.network.eos.config.interfaces
+ network.eos.config.interfaces.interfaces:
+ redirect: arista.eos.network.eos.config.interfaces.interfaces
+ network.eos.config.l2_interfaces:
+ redirect: arista.eos.network.eos.config.l2_interfaces
+ network.eos.config.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.config.l2_interfaces.l2_interfaces
+ network.eos.config.l3_interfaces:
+ redirect: arista.eos.network.eos.config.l3_interfaces
+ network.eos.config.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.config.l3_interfaces.l3_interfaces
+ network.eos.config.lacp:
+ redirect: arista.eos.network.eos.config.lacp
+ network.eos.config.lacp.lacp:
+ redirect: arista.eos.network.eos.config.lacp.lacp
+ network.eos.config.lacp_interfaces:
+ redirect: arista.eos.network.eos.config.lacp_interfaces
+ network.eos.config.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.config.lacp_interfaces.lacp_interfaces
+ network.eos.config.lag_interfaces:
+ redirect: arista.eos.network.eos.config.lag_interfaces
+ network.eos.config.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.config.lag_interfaces.lag_interfaces
+ network.eos.config.lldp_global:
+ redirect: arista.eos.network.eos.config.lldp_global
+ network.eos.config.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.config.lldp_global.lldp_global
+ network.eos.config.lldp_interfaces:
+ redirect: arista.eos.network.eos.config.lldp_interfaces
+ network.eos.config.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.config.lldp_interfaces.lldp_interfaces
+ network.eos.config.vlans:
+ redirect: arista.eos.network.eos.config.vlans
+ network.eos.config.vlans.vlans:
+ redirect: arista.eos.network.eos.config.vlans.vlans
+ network.eos.eos:
+ redirect: arista.eos.network.eos.eos
+ network.eos.facts:
+ redirect: arista.eos.network.eos.facts
+ network.eos.facts.facts:
+ redirect: arista.eos.network.eos.facts.facts
+ network.eos.facts.interfaces:
+ redirect: arista.eos.network.eos.facts.interfaces
+ network.eos.facts.interfaces.interfaces:
+ redirect: arista.eos.network.eos.facts.interfaces.interfaces
+ network.eos.facts.l2_interfaces:
+ redirect: arista.eos.network.eos.facts.l2_interfaces
+ network.eos.facts.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.facts.l2_interfaces.l2_interfaces
+ network.eos.facts.l3_interfaces:
+ redirect: arista.eos.network.eos.facts.l3_interfaces
+ network.eos.facts.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.facts.l3_interfaces.l3_interfaces
+ network.eos.facts.lacp:
+ redirect: arista.eos.network.eos.facts.lacp
+ network.eos.facts.lacp.lacp:
+ redirect: arista.eos.network.eos.facts.lacp.lacp
+ network.eos.facts.lacp_interfaces:
+ redirect: arista.eos.network.eos.facts.lacp_interfaces
+ network.eos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.facts.lacp_interfaces.lacp_interfaces
+ network.eos.facts.lag_interfaces:
+ redirect: arista.eos.network.eos.facts.lag_interfaces
+ network.eos.facts.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.facts.lag_interfaces.lag_interfaces
+ network.eos.facts.legacy:
+ redirect: arista.eos.network.eos.facts.legacy
+ network.eos.facts.legacy.base:
+ redirect: arista.eos.network.eos.facts.legacy.base
+ network.eos.facts.lldp_global:
+ redirect: arista.eos.network.eos.facts.lldp_global
+ network.eos.facts.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.facts.lldp_global.lldp_global
+ network.eos.facts.lldp_interfaces:
+ redirect: arista.eos.network.eos.facts.lldp_interfaces
+ network.eos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.facts.lldp_interfaces.lldp_interfaces
+ network.eos.facts.vlans:
+ redirect: arista.eos.network.eos.facts.vlans
+ network.eos.facts.vlans.vlans:
+ redirect: arista.eos.network.eos.facts.vlans.vlans
+ network.eos.providers:
+ redirect: arista.eos.network.eos.providers
+ network.eos.providers.cli:
+ redirect: arista.eos.network.eos.providers.cli
+ network.eos.providers.cli.config:
+ redirect: arista.eos.network.eos.providers.cli.config
+ network.eos.providers.cli.config.bgp:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp
+ network.eos.providers.cli.config.bgp.address_family:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.address_family
+ network.eos.providers.cli.config.bgp.neighbors:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.neighbors
+ network.eos.providers.cli.config.bgp.process:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.process
+ network.eos.providers.module:
+ redirect: arista.eos.network.eos.providers.module
+ network.eos.providers.providers:
+ redirect: arista.eos.network.eos.providers.providers
+ network.eos.utils:
+ redirect: arista.eos.network.eos.utils
+ network.eos.utils.utils:
+ redirect: arista.eos.network.eos.utils.utils
+ network.eric_eccli.eric_eccli:
+ redirect: community.network.network.eric_eccli.eric_eccli
+ network.exos.argspec.facts.facts:
+ redirect: community.network.network.exos.argspec.facts.facts
+ network.exos.argspec.lldp_global:
+ redirect: community.network.network.exos.argspec.lldp_global
+ network.exos.argspec.lldp_global.lldp_global:
+ redirect: community.network.network.exos.argspec.lldp_global.lldp_global
+ network.exos.config.lldp_global:
+ redirect: community.network.network.exos.config.lldp_global
+ network.exos.config.lldp_global.lldp_global:
+ redirect: community.network.network.exos.config.lldp_global.lldp_global
+ network.exos.exos:
+ redirect: community.network.network.exos.exos
+ network.exos.facts.facts:
+ redirect: community.network.network.exos.facts.facts
+ network.exos.facts.legacy:
+ redirect: community.network.network.exos.facts.legacy
+ network.exos.facts.legacy.base:
+ redirect: community.network.network.exos.facts.legacy.base
+ network.exos.facts.lldp_global:
+ redirect: community.network.network.exos.facts.lldp_global
+ network.exos.facts.lldp_global.lldp_global:
+ redirect: community.network.network.exos.facts.lldp_global.lldp_global
+ network.exos.utils.utils:
+ redirect: community.network.network.exos.utils.utils
+ network.f5.bigip:
+ redirect: f5networks.f5_modules.bigip
+ network.f5.bigiq:
+ redirect: f5networks.f5_modules.bigiq
+ network.f5.common:
+ redirect: f5networks.f5_modules.common
+ network.f5.compare:
+ redirect: f5networks.f5_modules.compare
+ network.f5.icontrol:
+ redirect: f5networks.f5_modules.icontrol
+ network.f5.ipaddress:
+ redirect: f5networks.f5_modules.ipaddress
+ # FIXME: missing
+ #network.f5.iworkflow:
+ # redirect: f5networks.f5_modules.iworkflow
+ #network.f5.legacy:
+ # redirect: f5networks.f5_modules.legacy
+ network.f5.urls:
+ redirect: f5networks.f5_modules.urls
+ network.fortianalyzer.common:
+ redirect: community.fortios.fortianalyzer.common
+ network.fortianalyzer.fortianalyzer:
+ redirect: community.fortios.fortianalyzer.fortianalyzer
+ network.fortimanager.common:
+ redirect: fortinet.fortimanager.common
+ network.fortimanager.fortimanager:
+ redirect: fortinet.fortimanager.fortimanager
+ network.fortios.argspec:
+ redirect: fortinet.fortios.fortios.argspec
+ network.fortios.argspec.facts:
+ redirect: fortinet.fortios.fortios.argspec.facts
+ network.fortios.argspec.facts.facts:
+ redirect: fortinet.fortios.fortios.argspec.facts.facts
+ network.fortios.argspec.system:
+ redirect: fortinet.fortios.fortios.argspec.system
+ network.fortios.argspec.system.system:
+ redirect: fortinet.fortios.fortios.argspec.system.system
+ network.fortios.facts:
+ redirect: fortinet.fortios.fortios.facts
+ network.fortios.facts.facts:
+ redirect: fortinet.fortios.fortios.facts.facts
+ network.fortios.facts.system:
+ redirect: fortinet.fortios.fortios.facts.system
+ network.fortios.facts.system.system:
+ redirect: fortinet.fortios.fortios.facts.system.system
+ network.fortios.fortios:
+ redirect: fortinet.fortios.fortios.fortios
+ network.frr:
+ redirect: frr.frr.network.frr
+ network.frr.frr:
+ redirect: frr.frr.network.frr.frr
+ network.frr.providers:
+ redirect: frr.frr.network.frr.providers
+ network.frr.providers.cli:
+ redirect: frr.frr.network.frr.providers.cli
+ network.frr.providers.cli.config:
+ redirect: frr.frr.network.frr.providers.cli.config
+ network.frr.providers.cli.config.base:
+ redirect: frr.frr.network.frr.providers.cli.config.base
+ network.frr.providers.cli.config.bgp:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp
+ network.frr.providers.cli.config.bgp.address_family:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.address_family
+ network.frr.providers.cli.config.bgp.neighbors:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.neighbors
+ network.frr.providers.cli.config.bgp.process:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.process
+ network.frr.providers.module:
+ redirect: frr.frr.network.frr.providers.module
+ network.frr.providers.providers:
+ redirect: frr.frr.network.frr.providers.providers
+ network.ftd:
+ redirect: community.network.network.ftd
+ network.ftd.common:
+ redirect: community.network.network.ftd.common
+ network.ftd.configuration:
+ redirect: community.network.network.ftd.configuration
+ network.ftd.device:
+ redirect: community.network.network.ftd.device
+ network.ftd.fdm_swagger_client:
+ redirect: community.network.network.ftd.fdm_swagger_client
+ network.ftd.operation:
+ redirect: community.network.network.ftd.operation
+ network.icx:
+ redirect: community.network.network.icx
+ network.icx.icx:
+ redirect: community.network.network.icx.icx
+ network.ingate:
+ redirect: community.network.network.ingate
+ network.ingate.common:
+ redirect: community.network.network.ingate.common
+ network.ios:
+ redirect: cisco.ios.network.ios
+ network.ios.argspec:
+ redirect: cisco.ios.network.ios.argspec
+ network.ios.argspec.facts:
+ redirect: cisco.ios.network.ios.argspec.facts
+ network.ios.argspec.facts.facts:
+ redirect: cisco.ios.network.ios.argspec.facts.facts
+ network.ios.argspec.interfaces:
+ redirect: cisco.ios.network.ios.argspec.interfaces
+ network.ios.argspec.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.argspec.interfaces.interfaces
+ network.ios.argspec.l2_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l2_interfaces
+ network.ios.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l2_interfaces.l2_interfaces
+ network.ios.argspec.l3_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l3_interfaces
+ network.ios.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l3_interfaces.l3_interfaces
+ network.ios.argspec.lacp:
+ redirect: cisco.ios.network.ios.argspec.lacp
+ network.ios.argspec.lacp.lacp:
+ redirect: cisco.ios.network.ios.argspec.lacp.lacp
+ network.ios.argspec.lacp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lacp_interfaces
+ network.ios.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lacp_interfaces.lacp_interfaces
+ network.ios.argspec.lag_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lag_interfaces
+ network.ios.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lag_interfaces.lag_interfaces
+ network.ios.argspec.lldp_global:
+ redirect: cisco.ios.network.ios.argspec.lldp_global
+ network.ios.argspec.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.argspec.lldp_global.lldp_global
+ network.ios.argspec.lldp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lldp_interfaces
+ network.ios.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lldp_interfaces.lldp_interfaces
+ network.ios.argspec.vlans:
+ redirect: cisco.ios.network.ios.argspec.vlans
+ network.ios.argspec.vlans.vlans:
+ redirect: cisco.ios.network.ios.argspec.vlans.vlans
+ network.ios.config:
+ redirect: cisco.ios.network.ios.config
+ network.ios.config.interfaces:
+ redirect: cisco.ios.network.ios.config.interfaces
+ network.ios.config.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.config.interfaces.interfaces
+ network.ios.config.l2_interfaces:
+ redirect: cisco.ios.network.ios.config.l2_interfaces
+ network.ios.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.config.l2_interfaces.l2_interfaces
+ network.ios.config.l3_interfaces:
+ redirect: cisco.ios.network.ios.config.l3_interfaces
+ network.ios.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.config.l3_interfaces.l3_interfaces
+ network.ios.config.lacp:
+ redirect: cisco.ios.network.ios.config.lacp
+ network.ios.config.lacp.lacp:
+ redirect: cisco.ios.network.ios.config.lacp.lacp
+ network.ios.config.lacp_interfaces:
+ redirect: cisco.ios.network.ios.config.lacp_interfaces
+ network.ios.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.config.lacp_interfaces.lacp_interfaces
+ network.ios.config.lag_interfaces:
+ redirect: cisco.ios.network.ios.config.lag_interfaces
+ network.ios.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.config.lag_interfaces.lag_interfaces
+ network.ios.config.lldp_global:
+ redirect: cisco.ios.network.ios.config.lldp_global
+ network.ios.config.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.config.lldp_global.lldp_global
+ network.ios.config.lldp_interfaces:
+ redirect: cisco.ios.network.ios.config.lldp_interfaces
+ network.ios.config.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.config.lldp_interfaces.lldp_interfaces
+ network.ios.config.vlans:
+ redirect: cisco.ios.network.ios.config.vlans
+ network.ios.config.vlans.vlans:
+ redirect: cisco.ios.network.ios.config.vlans.vlans
+ network.ios.facts:
+ redirect: cisco.ios.network.ios.facts
+ network.ios.facts.facts:
+ redirect: cisco.ios.network.ios.facts.facts
+ network.ios.facts.interfaces:
+ redirect: cisco.ios.network.ios.facts.interfaces
+ network.ios.facts.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.facts.interfaces.interfaces
+ network.ios.facts.l2_interfaces:
+ redirect: cisco.ios.network.ios.facts.l2_interfaces
+ network.ios.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.facts.l2_interfaces.l2_interfaces
+ network.ios.facts.l3_interfaces:
+ redirect: cisco.ios.network.ios.facts.l3_interfaces
+ network.ios.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.facts.l3_interfaces.l3_interfaces
+ network.ios.facts.lacp:
+ redirect: cisco.ios.network.ios.facts.lacp
+ network.ios.facts.lacp.lacp:
+ redirect: cisco.ios.network.ios.facts.lacp.lacp
+ network.ios.facts.lacp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lacp_interfaces
+ network.ios.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lacp_interfaces.lacp_interfaces
+ network.ios.facts.lag_interfaces:
+ redirect: cisco.ios.network.ios.facts.lag_interfaces
+ network.ios.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.facts.lag_interfaces.lag_interfaces
+ network.ios.facts.legacy:
+ redirect: cisco.ios.network.ios.facts.legacy
+ network.ios.facts.legacy.base:
+ redirect: cisco.ios.network.ios.facts.legacy.base
+ network.ios.facts.lldp_global:
+ redirect: cisco.ios.network.ios.facts.lldp_global
+ network.ios.facts.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.facts.lldp_global.lldp_global
+ network.ios.facts.lldp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lldp_interfaces
+ network.ios.facts.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lldp_interfaces.lldp_interfaces
+ network.ios.facts.vlans:
+ redirect: cisco.ios.network.ios.facts.vlans
+ network.ios.facts.vlans.vlans:
+ redirect: cisco.ios.network.ios.facts.vlans.vlans
+ network.ios.ios:
+ redirect: cisco.ios.network.ios.ios
+ network.ios.providers:
+ redirect: cisco.ios.network.ios.providers
+ network.ios.providers.cli:
+ redirect: cisco.ios.network.ios.providers.cli
+ network.ios.providers.cli.config:
+ redirect: cisco.ios.network.ios.providers.cli.config
+ network.ios.providers.cli.config.base:
+ redirect: cisco.ios.network.ios.providers.cli.config.base
+ network.ios.providers.cli.config.bgp:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp
+ network.ios.providers.cli.config.bgp.address_family:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.address_family
+ network.ios.providers.cli.config.bgp.neighbors:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.neighbors
+ network.ios.providers.cli.config.bgp.process:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.process
+ network.ios.providers.module:
+ redirect: cisco.ios.network.ios.providers.module
+ network.ios.providers.providers:
+ redirect: cisco.ios.network.ios.providers.providers
+ network.ios.utils:
+ redirect: cisco.ios.network.ios.utils
+ network.ios.utils.utils:
+ redirect: cisco.ios.network.ios.utils.utils
+ network.iosxr:
+ redirect: cisco.iosxr.network.iosxr
+ network.iosxr.argspec:
+ redirect: cisco.iosxr.network.iosxr.argspec
+ network.iosxr.argspec.facts:
+ redirect: cisco.iosxr.network.iosxr.argspec.facts
+ network.iosxr.argspec.facts.facts:
+ redirect: cisco.iosxr.network.iosxr.argspec.facts.facts
+ network.iosxr.argspec.interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.interfaces
+ network.iosxr.argspec.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.interfaces.interfaces
+ network.iosxr.argspec.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l2_interfaces
+ network.iosxr.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l2_interfaces.l2_interfaces
+ network.iosxr.argspec.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l3_interfaces
+ network.iosxr.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l3_interfaces.l3_interfaces
+ network.iosxr.argspec.lacp:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp
+ network.iosxr.argspec.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp.lacp
+ network.iosxr.argspec.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp_interfaces
+ network.iosxr.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp_interfaces.lacp_interfaces
+ network.iosxr.argspec.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lag_interfaces
+ network.iosxr.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lag_interfaces.lag_interfaces
+ network.iosxr.argspec.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_global
+ network.iosxr.argspec.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_global.lldp_global
+ network.iosxr.argspec.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_interfaces
+ network.iosxr.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_interfaces.lldp_interfaces
+ network.iosxr.config:
+ redirect: cisco.iosxr.network.iosxr.config
+ network.iosxr.config.interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.interfaces
+ network.iosxr.config.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.interfaces.interfaces
+ network.iosxr.config.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l2_interfaces
+ network.iosxr.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l2_interfaces.l2_interfaces
+ network.iosxr.config.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l3_interfaces
+ network.iosxr.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l3_interfaces.l3_interfaces
+ network.iosxr.config.lacp:
+ redirect: cisco.iosxr.network.iosxr.config.lacp
+ network.iosxr.config.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.config.lacp.lacp
+ network.iosxr.config.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lacp_interfaces
+ network.iosxr.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lacp_interfaces.lacp_interfaces
+ network.iosxr.config.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lag_interfaces
+ network.iosxr.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lag_interfaces.lag_interfaces
+ network.iosxr.config.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_global
+ network.iosxr.config.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_global.lldp_global
+ network.iosxr.config.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_interfaces
+ network.iosxr.config.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_interfaces.lldp_interfaces
+ network.iosxr.facts:
+ redirect: cisco.iosxr.network.iosxr.facts
+ network.iosxr.facts.facts:
+ redirect: cisco.iosxr.network.iosxr.facts.facts
+ network.iosxr.facts.interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.interfaces
+ network.iosxr.facts.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.interfaces.interfaces
+ network.iosxr.facts.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l2_interfaces
+ network.iosxr.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l2_interfaces.l2_interfaces
+ network.iosxr.facts.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l3_interfaces
+ network.iosxr.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l3_interfaces.l3_interfaces
+ network.iosxr.facts.lacp:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp
+ network.iosxr.facts.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp.lacp
+ network.iosxr.facts.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp_interfaces
+ network.iosxr.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp_interfaces.lacp_interfaces
+ network.iosxr.facts.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lag_interfaces
+ network.iosxr.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lag_interfaces.lag_interfaces
+ network.iosxr.facts.legacy:
+ redirect: cisco.iosxr.network.iosxr.facts.legacy
+ network.iosxr.facts.legacy.base:
+ redirect: cisco.iosxr.network.iosxr.facts.legacy.base
+ network.iosxr.facts.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_global
+ network.iosxr.facts.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_global.lldp_global
+ network.iosxr.facts.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_interfaces
+ network.iosxr.facts.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_interfaces.lldp_interfaces
+ network.iosxr.iosxr:
+ redirect: cisco.iosxr.network.iosxr.iosxr
+ network.iosxr.providers:
+ redirect: cisco.iosxr.network.iosxr.providers
+ network.iosxr.providers.cli:
+ redirect: cisco.iosxr.network.iosxr.providers.cli
+ network.iosxr.providers.cli.config:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config
+ network.iosxr.providers.cli.config.bgp:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp
+ network.iosxr.providers.cli.config.bgp.address_family:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.address_family
+ network.iosxr.providers.cli.config.bgp.neighbors:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.neighbors
+ network.iosxr.providers.cli.config.bgp.process:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.process
+ network.iosxr.providers.module:
+ redirect: cisco.iosxr.network.iosxr.providers.module
+ network.iosxr.providers.providers:
+ redirect: cisco.iosxr.network.iosxr.providers.providers
+ network.iosxr.utils:
+ redirect: cisco.iosxr.network.iosxr.utils
+ network.iosxr.utils.utils:
+ redirect: cisco.iosxr.network.iosxr.utils.utils
+ network.ironware:
+ redirect: community.network.network.ironware
+ network.ironware.ironware:
+ redirect: community.network.network.ironware.ironware
+ network.junos:
+ redirect: junipernetworks.junos.network.junos
+ network.junos.argspec:
+ redirect: junipernetworks.junos.network.junos.argspec
+ network.junos.argspec.facts:
+ redirect: junipernetworks.junos.network.junos.argspec.facts
+ network.junos.argspec.facts.facts:
+ redirect: junipernetworks.junos.network.junos.argspec.facts.facts
+ network.junos.argspec.interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.interfaces
+ network.junos.argspec.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.interfaces.interfaces
+ network.junos.argspec.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l2_interfaces
+ network.junos.argspec.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l2_interfaces.l2_interfaces
+ network.junos.argspec.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l3_interfaces
+ network.junos.argspec.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l3_interfaces.l3_interfaces
+ network.junos.argspec.lacp:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp
+ network.junos.argspec.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp.lacp
+ network.junos.argspec.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp_interfaces
+ network.junos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp_interfaces.lacp_interfaces
+ network.junos.argspec.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lag_interfaces
+ network.junos.argspec.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lag_interfaces.lag_interfaces
+ network.junos.argspec.lldp_global:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_global
+ network.junos.argspec.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_global.lldp_global
+ network.junos.argspec.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_interfaces
+ network.junos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_interfaces.lldp_interfaces
+ network.junos.argspec.vlans:
+ redirect: junipernetworks.junos.network.junos.argspec.vlans
+ network.junos.argspec.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.argspec.vlans.vlans
+ network.junos.config:
+ redirect: junipernetworks.junos.network.junos.config
+ network.junos.config.interfaces:
+ redirect: junipernetworks.junos.network.junos.config.interfaces
+ network.junos.config.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.config.interfaces.interfaces
+ network.junos.config.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l2_interfaces
+ network.junos.config.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l2_interfaces.l2_interfaces
+ network.junos.config.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l3_interfaces
+ network.junos.config.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l3_interfaces.l3_interfaces
+ network.junos.config.lacp:
+ redirect: junipernetworks.junos.network.junos.config.lacp
+ network.junos.config.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.config.lacp.lacp
+ network.junos.config.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lacp_interfaces
+ network.junos.config.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lacp_interfaces.lacp_interfaces
+ network.junos.config.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lag_interfaces
+ network.junos.config.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lag_interfaces.lag_interfaces
+ network.junos.config.lldp_global:
+ redirect: junipernetworks.junos.network.junos.config.lldp_global
+ network.junos.config.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.config.lldp_global.lldp_global
+ network.junos.config.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lldp_interfaces
+ network.junos.config.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lldp_interfaces.lldp_interfaces
+ network.junos.config.vlans:
+ redirect: junipernetworks.junos.network.junos.config.vlans
+ network.junos.config.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.config.vlans.vlans
+ network.junos.facts:
+ redirect: junipernetworks.junos.network.junos.facts
+ network.junos.facts.facts:
+ redirect: junipernetworks.junos.network.junos.facts.facts
+ network.junos.facts.interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.interfaces
+ network.junos.facts.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.interfaces.interfaces
+ network.junos.facts.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l2_interfaces
+ network.junos.facts.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l2_interfaces.l2_interfaces
+ network.junos.facts.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l3_interfaces
+ network.junos.facts.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l3_interfaces.l3_interfaces
+ network.junos.facts.lacp:
+ redirect: junipernetworks.junos.network.junos.facts.lacp
+ network.junos.facts.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.facts.lacp.lacp
+ network.junos.facts.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lacp_interfaces
+ network.junos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lacp_interfaces.lacp_interfaces
+ network.junos.facts.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lag_interfaces
+ network.junos.facts.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lag_interfaces.lag_interfaces
+ network.junos.facts.legacy:
+ redirect: junipernetworks.junos.network.junos.facts.legacy
+ network.junos.facts.legacy.base:
+ redirect: junipernetworks.junos.network.junos.facts.legacy.base
+ network.junos.facts.lldp_global:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_global
+ network.junos.facts.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_global.lldp_global
+ network.junos.facts.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_interfaces
+ network.junos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_interfaces.lldp_interfaces
+ network.junos.facts.vlans:
+ redirect: junipernetworks.junos.network.junos.facts.vlans
+ network.junos.facts.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.facts.vlans.vlans
+ network.junos.junos:
+ redirect: junipernetworks.junos.network.junos.junos
+ network.junos.utils:
+ redirect: junipernetworks.junos.network.junos.utils
+ network.junos.utils.utils:
+ redirect: junipernetworks.junos.network.junos.utils.utils
+ network.meraki:
+ redirect: cisco.meraki.network.meraki
+ network.meraki.meraki:
+ redirect: cisco.meraki.network.meraki.meraki
+ network.netconf:
+ redirect: ansible.netcommon.network.netconf
+ network.netconf.netconf:
+ redirect: ansible.netcommon.network.netconf.netconf
+ network.netscaler:
+ redirect: community.network.network.netscaler
+ network.netscaler.netscaler:
+ redirect: community.network.network.netscaler.netscaler
+ network.netvisor:
+ redirect: community.network.network.netvisor
+ network.netvisor.netvisor:
+ redirect: community.network.network.netvisor.netvisor
+ network.netvisor.pn_nvos:
+ redirect: community.network.network.netvisor.pn_nvos
+ network.nos:
+ redirect: community.network.network.nos
+ network.nos.nos:
+ redirect: community.network.network.nos.nos
+ network.nso:
+ redirect: cisco.nso.nso
+ network.nso.nso:
+ redirect: cisco.nso.nso
+ network.nxos:
+ redirect: cisco.nxos.network.nxos
+ network.nxos.argspec:
+ redirect: cisco.nxos.network.nxos.argspec
+ network.nxos.argspec.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.bfd_interfaces
+ network.nxos.argspec.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.bfd_interfaces.bfd_interfaces
+ network.nxos.argspec.facts:
+ redirect: cisco.nxos.network.nxos.argspec.facts
+ network.nxos.argspec.facts.facts:
+ redirect: cisco.nxos.network.nxos.argspec.facts.facts
+ network.nxos.argspec.interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.interfaces
+ network.nxos.argspec.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.interfaces.interfaces
+ network.nxos.argspec.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l2_interfaces
+ network.nxos.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l2_interfaces.l2_interfaces
+ network.nxos.argspec.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l3_interfaces
+ network.nxos.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l3_interfaces.l3_interfaces
+ network.nxos.argspec.lacp:
+ redirect: cisco.nxos.network.nxos.argspec.lacp
+ network.nxos.argspec.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.argspec.lacp.lacp
+ network.nxos.argspec.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lacp_interfaces
+ network.nxos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lacp_interfaces.lacp_interfaces
+ network.nxos.argspec.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lag_interfaces
+ network.nxos.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lag_interfaces.lag_interfaces
+ network.nxos.argspec.lldp_global:
+ redirect: cisco.nxos.network.nxos.argspec.lldp_global
+ network.nxos.argspec.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.argspec.lldp_global.lldp_global
+ network.nxos.argspec.telemetry:
+ redirect: cisco.nxos.network.nxos.argspec.telemetry
+ network.nxos.argspec.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.argspec.telemetry.telemetry
+ network.nxos.argspec.vlans:
+ redirect: cisco.nxos.network.nxos.argspec.vlans
+ network.nxos.argspec.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.argspec.vlans.vlans
+ network.nxos.cmdref:
+ redirect: cisco.nxos.network.nxos.cmdref
+ network.nxos.cmdref.telemetry:
+ redirect: cisco.nxos.network.nxos.cmdref.telemetry
+ network.nxos.cmdref.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.cmdref.telemetry.telemetry
+ network.nxos.config:
+ redirect: cisco.nxos.network.nxos.config
+ network.nxos.config.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.config.bfd_interfaces
+ network.nxos.config.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.config.bfd_interfaces.bfd_interfaces
+ network.nxos.config.interfaces:
+ redirect: cisco.nxos.network.nxos.config.interfaces
+ network.nxos.config.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.config.interfaces.interfaces
+ network.nxos.config.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l2_interfaces
+ network.nxos.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l2_interfaces.l2_interfaces
+ network.nxos.config.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l3_interfaces
+ network.nxos.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l3_interfaces.l3_interfaces
+ network.nxos.config.lacp:
+ redirect: cisco.nxos.network.nxos.config.lacp
+ network.nxos.config.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.config.lacp.lacp
+ network.nxos.config.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lacp_interfaces
+ network.nxos.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lacp_interfaces.lacp_interfaces
+ network.nxos.config.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lag_interfaces
+ network.nxos.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lag_interfaces.lag_interfaces
+ network.nxos.config.lldp_global:
+ redirect: cisco.nxos.network.nxos.config.lldp_global
+ network.nxos.config.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.config.lldp_global.lldp_global
+ network.nxos.config.telemetry:
+ redirect: cisco.nxos.network.nxos.config.telemetry
+ network.nxos.config.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.config.telemetry.telemetry
+ network.nxos.config.vlans:
+ redirect: cisco.nxos.network.nxos.config.vlans
+ network.nxos.config.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.config.vlans.vlans
+ network.nxos.facts:
+ redirect: cisco.nxos.network.nxos.facts
+ network.nxos.facts.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.bfd_interfaces
+ network.nxos.facts.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.bfd_interfaces.bfd_interfaces
+ network.nxos.facts.facts:
+ redirect: cisco.nxos.network.nxos.facts.facts
+ network.nxos.facts.interfaces:
+ redirect: cisco.nxos.network.nxos.facts.interfaces
+ network.nxos.facts.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.facts.interfaces.interfaces
+ network.nxos.facts.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l2_interfaces
+ network.nxos.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l2_interfaces.l2_interfaces
+ network.nxos.facts.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l3_interfaces
+ network.nxos.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l3_interfaces.l3_interfaces
+ network.nxos.facts.lacp:
+ redirect: cisco.nxos.network.nxos.facts.lacp
+ network.nxos.facts.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.facts.lacp.lacp
+ network.nxos.facts.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lacp_interfaces
+ network.nxos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lacp_interfaces.lacp_interfaces
+ network.nxos.facts.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lag_interfaces
+ network.nxos.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lag_interfaces.lag_interfaces
+ network.nxos.facts.legacy:
+ redirect: cisco.nxos.network.nxos.facts.legacy
+ network.nxos.facts.legacy.base:
+ redirect: cisco.nxos.network.nxos.facts.legacy.base
+ network.nxos.facts.lldp_global:
+ redirect: cisco.nxos.network.nxos.facts.lldp_global
+ network.nxos.facts.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.facts.lldp_global.lldp_global
+ network.nxos.facts.telemetry:
+ redirect: cisco.nxos.network.nxos.facts.telemetry
+ network.nxos.facts.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.facts.telemetry.telemetry
+ network.nxos.facts.vlans:
+ redirect: cisco.nxos.network.nxos.facts.vlans
+ network.nxos.facts.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.facts.vlans.vlans
+ network.nxos.nxos:
+ redirect: cisco.nxos.network.nxos.nxos
+ network.nxos.utils:
+ redirect: cisco.nxos.network.nxos.utils
+ network.nxos.utils.telemetry:
+ redirect: cisco.nxos.network.nxos.utils.telemetry
+ network.nxos.utils.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.utils.telemetry.telemetry
+ network.nxos.utils.utils:
+ redirect: cisco.nxos.network.nxos.utils.utils
+ network.onyx:
+ redirect: mellanox.onyx.network.onyx
+ network.onyx.onyx:
+ redirect: mellanox.onyx.network.onyx.onyx
+ network.ordnance:
+ redirect: community.network.network.ordnance
+ network.ordnance.ordnance:
+ redirect: community.network.network.ordnance.ordnance
+ network.panos:
+ redirect: community.network.network.panos
+ network.panos.panos:
+ redirect: community.network.network.panos.panos
+ network.restconf:
+ redirect: ansible.netcommon.network.restconf
+ network.restconf.restconf:
+ redirect: ansible.netcommon.network.restconf.restconf
+ network.routeros:
+ redirect: community.routeros.routeros
+ network.routeros.routeros:
+ redirect: community.routeros.routeros
+ network.skydive:
+ redirect: community.skydive.network.skydive
+ network.skydive.api:
+ redirect: community.skydive.network.skydive.api
+ network.slxos:
+ redirect: community.network.network.slxos
+ network.slxos.slxos:
+ redirect: community.network.network.slxos.slxos
+ network.sros:
+ redirect: community.network.network.sros
+ network.sros.sros:
+ redirect: community.network.network.sros.sros
+ network.voss:
+ redirect: community.network.network.voss
+ network.voss.voss:
+ redirect: community.network.network.voss.voss
+ network.vyos:
+ redirect: vyos.vyos.network.vyos
+ network.vyos.argspec:
+ redirect: vyos.vyos.network.vyos.argspec
+ network.vyos.argspec.facts:
+ redirect: vyos.vyos.network.vyos.argspec.facts
+ network.vyos.argspec.facts.facts:
+ redirect: vyos.vyos.network.vyos.argspec.facts.facts
+ network.vyos.argspec.interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.interfaces
+ network.vyos.argspec.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.interfaces.interfaces
+ network.vyos.argspec.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.l3_interfaces
+ network.vyos.argspec.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.l3_interfaces.l3_interfaces
+ network.vyos.argspec.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lag_interfaces
+ network.vyos.argspec.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lag_interfaces.lag_interfaces
+ network.vyos.argspec.lldp_global:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_global
+ network.vyos.argspec.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_global.lldp_global
+ network.vyos.argspec.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_interfaces
+ network.vyos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_interfaces.lldp_interfaces
+ network.vyos.config:
+ redirect: vyos.vyos.network.vyos.config
+ network.vyos.config.interfaces:
+ redirect: vyos.vyos.network.vyos.config.interfaces
+ network.vyos.config.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.config.interfaces.interfaces
+ network.vyos.config.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.config.l3_interfaces
+ network.vyos.config.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.config.l3_interfaces.l3_interfaces
+ network.vyos.config.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lag_interfaces
+ network.vyos.config.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lag_interfaces.lag_interfaces
+ network.vyos.config.lldp_global:
+ redirect: vyos.vyos.network.vyos.config.lldp_global
+ network.vyos.config.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.config.lldp_global.lldp_global
+ network.vyos.config.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lldp_interfaces
+ network.vyos.config.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lldp_interfaces.lldp_interfaces
+ network.vyos.facts:
+ redirect: vyos.vyos.network.vyos.facts
+ network.vyos.facts.facts:
+ redirect: vyos.vyos.network.vyos.facts.facts
+ network.vyos.facts.interfaces:
+ redirect: vyos.vyos.network.vyos.facts.interfaces
+ network.vyos.facts.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.facts.interfaces.interfaces
+ network.vyos.facts.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.l3_interfaces
+ network.vyos.facts.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.l3_interfaces.l3_interfaces
+ network.vyos.facts.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lag_interfaces
+ network.vyos.facts.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lag_interfaces.lag_interfaces
+ network.vyos.facts.legacy:
+ redirect: vyos.vyos.network.vyos.facts.legacy
+ network.vyos.facts.legacy.base:
+ redirect: vyos.vyos.network.vyos.facts.legacy.base
+ network.vyos.facts.lldp_global:
+ redirect: vyos.vyos.network.vyos.facts.lldp_global
+ network.vyos.facts.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.facts.lldp_global.lldp_global
+ network.vyos.facts.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lldp_interfaces
+ network.vyos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lldp_interfaces.lldp_interfaces
+ network.vyos.utils:
+ redirect: vyos.vyos.network.vyos.utils
+ network.vyos.utils.utils:
+ redirect: vyos.vyos.network.vyos.utils.utils
+ network.vyos.vyos:
+ redirect: vyos.vyos.network.vyos.vyos
+ oneandone:
+ redirect: community.general.oneandone
+ oneview:
+ redirect: community.general.oneview
+ online:
+ redirect: community.general.online
+ opennebula:
+ redirect: community.general.opennebula
+ openstack:
+ redirect: openstack.cloud.openstack
+ oracle:
+ redirect: community.general.oracle
+ oracle.oci_utils:
+ redirect: community.general.oracle.oci_utils
+ ovirt:
+ redirect: community.general._ovirt
+ podman:
+ redirect: containers.podman.podman
+ podman.common:
+ redirect: containers.podman.podman.common
+ postgres:
+ redirect: community.postgresql.postgres
+ pure:
+ redirect: community.general.pure
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ rax:
+ redirect: community.general.rax
+ redfish_utils:
+ redirect: community.general.redfish_utils
+ redhat:
+ redirect: community.general.redhat
+ remote_management.dellemc:
+ redirect: dellemc.openmanage
+ remote_management.dellemc.dellemc_idrac:
+ redirect: dellemc.openmanage.dellemc_idrac
+ remote_management.dellemc.ome:
+ redirect: dellemc.openmanage.ome
+ remote_management.intersight:
+ redirect: cisco.intersight.intersight
+ remote_management.lxca:
+ redirect: community.general.remote_management.lxca
+ remote_management.lxca.common:
+ redirect: community.general.remote_management.lxca.common
+ remote_management.ucs:
+ redirect: cisco.ucs.ucs
+ scaleway:
+ redirect: community.general.scaleway
+ service_now:
+ redirect: servicenow.servicenow.service_now
+ source_control:
+ redirect: community.general.source_control
+ source_control.bitbucket:
+ redirect: community.general.source_control.bitbucket
+ storage:
+ redirect: community.general.storage
+ storage.emc:
+ redirect: community.general.storage.emc
+ storage.emc.emc_vnx:
+ redirect: community.general.storage.emc.emc_vnx
+ storage.hpe3par:
+ redirect: community.general.storage.hpe3par
+ storage.hpe3par.hpe3par:
+ redirect: community.general.storage.hpe3par.hpe3par
+ univention_umc:
+ redirect: community.general.univention_umc
+ utm_utils:
+ redirect: community.general.utm_utils
+ vca:
+ redirect: community.vmware.vca
+ vexata:
+ redirect: community.general.vexata
+ vmware:
+ redirect: community.vmware.vmware
+ vmware_rest_client:
+ redirect: community.vmware.vmware_rest_client
+ vmware_spbm:
+ redirect: community.vmware.vmware_spbm
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ xenserver:
+ redirect: community.general.xenserver
+ # end module_utils
+ cliconf:
+ frr:
+ redirect: frr.frr.frr
+ aireos:
+ redirect: community.network.aireos
+ apconos:
+ redirect: community.network.apconos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ cnos:
+ redirect: community.network.cnos
+ edgeos:
+ redirect: community.network.edgeos
+ edgeswitch:
+ redirect: community.network.edgeswitch
+ enos:
+ redirect: community.network.enos
+ eric_eccli:
+ redirect: community.network.eric_eccli
+ exos:
+ redirect: community.network.exos
+ icx:
+ redirect: community.network.icx
+ ironware:
+ redirect: community.network.ironware
+ netvisor:
+ redirect: community.network.netvisor
+ nos:
+ redirect: community.network.nos
+ onyx:
+ redirect: mellanox.onyx.onyx
+ routeros:
+ redirect: community.routeros.routeros
+ slxos:
+ redirect: community.network.slxos
+ voss:
+ redirect: community.network.voss
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ terminal:
+ frr:
+ redirect: frr.frr.frr
+ aireos:
+ redirect: community.network.aireos
+ apconos:
+ redirect: community.network.apconos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ cnos:
+ redirect: community.network.cnos
+ edgeos:
+ redirect: community.network.edgeos
+ edgeswitch:
+ redirect: community.network.edgeswitch
+ enos:
+ redirect: community.network.enos
+ eric_eccli:
+ redirect: community.network.eric_eccli
+ exos:
+ redirect: community.network.exos
+ icx:
+ redirect: community.network.icx
+ ironware:
+ redirect: community.network.ironware
+ netvisor:
+ redirect: community.network.netvisor
+ nos:
+ redirect: community.network.nos
+ onyx:
+ redirect: mellanox.onyx.onyx
+ routeros:
+ redirect: community.routeros.routeros
+ slxos:
+ redirect: community.network.slxos
+ sros:
+ redirect: community.network.sros
+ voss:
+ redirect: community.network.voss
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ bigip:
+ redirect: f5networks.f5_modules.bigip
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ action:
+ # test entry, overloaded with module of same name to use a different base action (ie not "normal.py")
+ uses_redirected_action:
+ redirect: testns.testcoll.subclassed_norm
+ aireos:
+ redirect: community.network.aireos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ ce_template:
+ redirect: community.network.ce_template
+ cnos:
+ redirect: community.network.cnos
+ edgeos_config:
+ redirect: community.network.edgeos_config
+ enos:
+ redirect: community.network.enos
+ exos:
+ redirect: community.network.exos
+ ironware:
+ redirect: community.network.ironware
+ nos_config:
+ redirect: community.network.nos_config
+ onyx_config:
+ redirect: mellanox.onyx.onyx_config
+ slxos:
+ redirect: community.network.slxos
+ sros:
+ redirect: community.network.sros
+ voss:
+ redirect: community.network.voss
+ aws_s3:
+ redirect: amazon.aws.aws_s3
+ cli_command:
+ redirect: ansible.netcommon.cli_command
+ cli_config:
+ redirect: ansible.netcommon.cli_config
+ net_base:
+ redirect: ansible.netcommon.net_base
+ net_user:
+ redirect: ansible.netcommon.net_user
+ net_vlan:
+ redirect: ansible.netcommon.net_vlan
+ net_static_route:
+ redirect: ansible.netcommon.net_static_route
+ net_lldp:
+ redirect: ansible.netcommon.net_lldp
+ net_vrf:
+ redirect: ansible.netcommon.net_vrf
+ net_ping:
+ redirect: ansible.netcommon.net_ping
+ net_l3_interface:
+ redirect: ansible.netcommon.net_l3_interface
+ net_l2_interface:
+ redirect: ansible.netcommon.net_l2_interface
+ net_interface:
+ redirect: ansible.netcommon.net_interface
+ net_system:
+ redirect: ansible.netcommon.net_system
+ net_lldp_interface:
+ redirect: ansible.netcommon.net_lldp_interface
+ net_put:
+ redirect: ansible.netcommon.net_put
+ net_get:
+ redirect: ansible.netcommon.net_get
+ net_logging:
+ redirect: ansible.netcommon.net_logging
+ net_banner:
+ redirect: ansible.netcommon.net_banner
+ net_linkagg:
+ redirect: ansible.netcommon.net_linkagg
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network:
+ redirect: ansible.netcommon.network
+ telnet:
+ redirect: ansible.netcommon.telnet
+ patch:
+ redirect: ansible.posix.patch
+ synchronize:
+ redirect: ansible.posix.synchronize
+ win_copy:
+ redirect: ansible.windows.win_copy
+ win_reboot:
+ redirect: ansible.windows.win_reboot
+ win_template:
+ redirect: ansible.windows.win_template
+ win_updates:
+ redirect: ansible.windows.win_updates
+ fortios_config:
+ redirect: fortinet.fortios.fortios_config
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ nxos_file_copy:
+ redirect: cisco.nxos.nxos_file_copy
+ bigip:
+ redirect: f5networks.f5_modules.bigip
+ bigiq:
+ redirect: f5networks.f5_modules.bigiq
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ become:
+ doas:
+ redirect: community.general.doas
+ dzdo:
+ redirect: community.general.dzdo
+ ksu:
+ redirect: community.general.ksu
+ machinectl:
+ redirect: community.general.machinectl
+ pbrun:
+ redirect: community.general.pbrun
+ pfexec:
+ redirect: community.general.pfexec
+ pmrun:
+ redirect: community.general.pmrun
+ sesu:
+ redirect: community.general.sesu
+ enable:
+ redirect: ansible.netcommon.enable
+ cache:
+ memcached:
+ redirect: community.general.memcached
+ pickle:
+ redirect: community.general.pickle
+ redis:
+ redirect: community.general.redis
+ yaml:
+ redirect: community.general.yaml
+ mongodb:
+ redirect: community.mongodb.mongodb
+ callback:
+ actionable:
+ redirect: community.general.actionable
+ cgroup_memory_recap:
+ redirect: community.general.cgroup_memory_recap
+ context_demo:
+ redirect: community.general.context_demo
+ counter_enabled:
+ redirect: community.general.counter_enabled
+ dense:
+ redirect: community.general.dense
+ full_skip:
+ redirect: community.general.full_skip
+ hipchat:
+ redirect: community.general.hipchat
+ jabber:
+ redirect: community.general.jabber
+ log_plays:
+ redirect: community.general.log_plays
+ logdna:
+ redirect: community.general.logdna
+ logentries:
+ redirect: community.general.logentries
+ logstash:
+ redirect: community.general.logstash
+ mail:
+ redirect: community.general.mail
+ nrdp:
+ redirect: community.general.nrdp
+ 'null':
+ redirect: community.general.null
+ osx_say:
+ redirect: community.general.osx_say
+ say:
+ redirect: community.general.say
+ selective:
+ redirect: community.general.selective
+ slack:
+ redirect: community.general.slack
+ splunk:
+ redirect: community.general.splunk
+ stderr:
+ redirect: community.general.stderr
+ sumologic:
+ redirect: community.general.sumologic
+ syslog_json:
+ redirect: community.general.syslog_json
+ unixy:
+ redirect: community.general.unixy
+ yaml:
+ redirect: community.general.yaml
+ grafana_annotations:
+ redirect: community.grafana.grafana_annotations
+ aws_resource_actions:
+ redirect: amazon.aws.aws_resource_actions
+ cgroup_perf_recap:
+ redirect: ansible.posix.cgroup_perf_recap
+ debug:
+ redirect: ansible.posix.debug
+ json:
+ redirect: ansible.posix.json
+ profile_roles:
+ redirect: ansible.posix.profile_roles
+ profile_tasks:
+ redirect: ansible.posix.profile_tasks
+ skippy:
+ redirect: ansible.posix.skippy
+ timer:
+ redirect: ansible.posix.timer
+ foreman:
+ redirect: theforeman.foreman.foreman
+ # 'collections' integration test entries, do not remove
+ formerly_core_callback:
+ redirect: testns.testcoll.usercallback
+ formerly_core_removed_callback:
+ redirect: testns.testcoll.removedcallback
+ formerly_core_missing_callback:
+ redirect: bogusns.boguscoll.boguscallback
+ doc_fragments:
+ a10:
+ redirect: community.network.a10
+ aireos:
+ redirect: community.network.aireos
+ alicloud:
+ redirect: community.general.alicloud
+ aruba:
+ redirect: community.network.aruba
+ auth_basic:
+ redirect: community.general.auth_basic
+ avi:
+ redirect: community.network.avi
+ ce:
+ redirect: community.network.ce
+ cloudscale:
+ redirect: cloudscale_ch.cloud.api_parameters
+ cloudstack:
+ redirect: ngine_io.cloudstack.cloudstack
+ cnos:
+ redirect: community.network.cnos
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ dimensiondata:
+ redirect: community.general.dimensiondata
+ dimensiondata_wait:
+ redirect: community.general.dimensiondata_wait
+ docker:
+ redirect: community.docker.docker
+ emc:
+ redirect: community.general.emc
+ enos:
+ redirect: community.network.enos
+ exoscale:
+ redirect: ngine_io.exoscale.exoscale
+ gcp:
+ redirect: google.cloud.gcp
+ hetzner:
+ redirect: community.hrobot.robot
+ hpe3par:
+ redirect: community.general.hpe3par
+ hwc:
+ redirect: community.general.hwc
+ ibm_storage:
+ redirect: community.general.ibm_storage
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ influxdb:
+ redirect: community.general.influxdb
+ ingate:
+ redirect: community.network.ingate
+ ipa:
+ redirect: community.general.ipa
+ ironware:
+ redirect: community.network.ironware
+ keycloak:
+ redirect: community.general.keycloak
+ kubevirt_common_options:
+ redirect: community.kubevirt.kubevirt_common_options
+ kubevirt_vm_options:
+ redirect: community.kubevirt.kubevirt_vm_options
+ ldap:
+ redirect: community.general.ldap
+ lxca_common:
+ redirect: community.general.lxca_common
+ manageiq:
+ redirect: community.general.manageiq
+ mysql:
+ redirect: community.mysql.mysql
+ netscaler:
+ redirect: community.network.netscaler
+ nios:
+ redirect: community.general.nios
+ nso:
+ redirect: cisco.nso.nso
+ oneview:
+ redirect: community.general.oneview
+ online:
+ redirect: community.general.online
+ onyx:
+ redirect: mellanox.onyx.onyx
+ opennebula:
+ redirect: community.general.opennebula
+ openswitch:
+ redirect: community.general.openswitch
+ oracle:
+ redirect: community.general.oracle
+ oracle_creatable_resource:
+ redirect: community.general.oracle_creatable_resource
+ oracle_display_name_option:
+ redirect: community.general.oracle_display_name_option
+ oracle_name_option:
+ redirect: community.general.oracle_name_option
+ oracle_tags:
+ redirect: community.general.oracle_tags
+ oracle_wait_options:
+ redirect: community.general.oracle_wait_options
+ ovirt_facts:
+ redirect: community.general.ovirt_facts
+ panos:
+ redirect: community.network.panos
+ postgres:
+ redirect: community.postgresql.postgres
+ proxysql:
+ redirect: community.proxysql.proxysql
+ purestorage:
+ redirect: community.general.purestorage
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ rackspace:
+ redirect: community.general.rackspace
+ scaleway:
+ redirect: community.general.scaleway
+ sros:
+ redirect: community.network.sros
+ utm:
+ redirect: community.general.utm
+ vexata:
+ redirect: community.general.vexata
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ xenserver:
+ redirect: community.general.xenserver
+ zabbix:
+ redirect: community.zabbix.zabbix
+ k8s_auth_options:
+ redirect: kubernetes.core.k8s_auth_options
+ k8s_name_options:
+ redirect: kubernetes.core.k8s_name_options
+ k8s_resource_options:
+ redirect: kubernetes.core.k8s_resource_options
+ k8s_scale_options:
+ redirect: kubernetes.core.k8s_scale_options
+ k8s_state_options:
+ redirect: kubernetes.core.k8s_state_options
+ acme:
+ redirect: community.crypto.acme
+ ecs_credential:
+ redirect: community.crypto.ecs_credential
+ VmwareRestModule:
+ redirect: vmware.vmware_rest.VmwareRestModule
+ VmwareRestModule_filters:
+ redirect: vmware.vmware_rest.VmwareRestModule_filters
+ VmwareRestModule_full:
+ redirect: vmware.vmware_rest.VmwareRestModule_full
+ VmwareRestModule_state:
+ redirect: vmware.vmware_rest.VmwareRestModule_state
+ vca:
+ redirect: community.vmware.vca
+ vmware:
+ redirect: community.vmware.vmware
+ vmware_rest_client:
+ redirect: community.vmware.vmware_rest_client
+ service_now:
+ redirect: servicenow.servicenow.service_now
+ aws:
+ redirect: amazon.aws.aws
+ aws_credentials:
+ redirect: amazon.aws.aws_credentials
+ aws_region:
+ redirect: amazon.aws.aws_region
+ ec2:
+ redirect: amazon.aws.ec2
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network_agnostic:
+ redirect: ansible.netcommon.network_agnostic
+ fortios:
+ redirect: fortinet.fortios.fortios
+ netapp:
+ redirect: netapp.ontap.netapp
+ checkpoint_commands:
+ redirect: check_point.mgmt.checkpoint_commands
+ checkpoint_facts:
+ redirect: check_point.mgmt.checkpoint_facts
+ checkpoint_objects:
+ redirect: check_point.mgmt.checkpoint_objects
+ eos:
+ redirect: arista.eos.eos
+ aci:
+ redirect: cisco.aci.aci
+ asa:
+ redirect: cisco.asa.asa
+ intersight:
+ redirect: cisco.intersight.intersight
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ meraki:
+ redirect: cisco.meraki.meraki
+ mso:
+ redirect: cisco.mso.modules
+ nxos:
+ redirect: cisco.nxos.nxos
+ ucs:
+ redirect: cisco.ucs.ucs
+ f5:
+ redirect: f5networks.f5_modules.f5
+ openstack:
+ redirect: openstack.cloud.openstack
+ junos:
+ redirect: junipernetworks.junos.junos
+ tower:
+ redirect: awx.awx.auth
+ ovirt:
+ redirect: ovirt.ovirt.ovirt
+ ovirt_info:
+ redirect: ovirt.ovirt.ovirt_info
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ skydive:
+ redirect: community.skydive.skydive
+ azure:
+ redirect: azure.azcollection.azure
+ azure_tags:
+ redirect: azure.azcollection.azure_tags
+ vyos:
+ redirect: vyos.vyos.vyos
+ filter:
+ # test entries
+ formerly_core_filter:
+ redirect: ansible.builtin.bool
+ formerly_core_masked_filter:
+ redirect: ansible.builtin.bool
+ gcp_kms_encrypt:
+ redirect: google.cloud.gcp_kms_encrypt
+ gcp_kms_decrypt:
+ redirect: google.cloud.gcp_kms_decrypt
+ json_query:
+ redirect: community.general.json_query
+ random_mac:
+ redirect: community.general.random_mac
+ k8s_config_resource_name:
+ redirect: kubernetes.core.k8s_config_resource_name
+ cidr_merge:
+ redirect: ansible.netcommon.cidr_merge
+ ipaddr:
+ redirect: ansible.netcommon.ipaddr
+ ipmath:
+ redirect: ansible.netcommon.ipmath
+ ipwrap:
+ redirect: ansible.netcommon.ipwrap
+ ip4_hex:
+ redirect: ansible.netcommon.ip4_hex
+ ipv4:
+ redirect: ansible.netcommon.ipv4
+ ipv6:
+ redirect: ansible.netcommon.ipv6
+ ipsubnet:
+ redirect: ansible.netcommon.ipsubnet
+ next_nth_usable:
+ redirect: ansible.netcommon.next_nth_usable
+ network_in_network:
+ redirect: ansible.netcommon.network_in_network
+ network_in_usable:
+ redirect: ansible.netcommon.network_in_usable
+ reduce_on_network:
+ redirect: ansible.netcommon.reduce_on_network
+ nthhost:
+ redirect: ansible.netcommon.nthhost
+ previous_nth_usable:
+ redirect: ansible.netcommon.previous_nth_usable
+ slaac:
+ redirect: ansible.netcommon.slaac
+ hwaddr:
+ redirect: ansible.netcommon.hwaddr
+ parse_cli:
+ redirect: ansible.netcommon.parse_cli
+ parse_cli_textfsm:
+ redirect: ansible.netcommon.parse_cli_textfsm
+ parse_xml:
+ redirect: ansible.netcommon.parse_xml
+ type5_pw:
+ redirect: ansible.netcommon.type5_pw
+ hash_salt:
+ redirect: ansible.netcommon.hash_salt
+ comp_type5:
+ redirect: ansible.netcommon.comp_type5
+ vlan_parser:
+ redirect: ansible.netcommon.vlan_parser
+ httpapi:
+ exos:
+ redirect: community.network.exos
+ fortianalyzer:
+ redirect: community.fortios.fortianalyzer
+ fortimanager:
+ redirect: fortinet.fortimanager.fortimanager
+ ftd:
+ redirect: community.network.ftd
+ vmware:
+ redirect: community.vmware.vmware
+ restconf:
+ redirect: ansible.netcommon.restconf
+ fortios:
+ redirect: fortinet.fortios.fortios
+ checkpoint:
+ redirect: check_point.mgmt.checkpoint
+ eos:
+ redirect: arista.eos.eos
+ nxos:
+ redirect: cisco.nxos.nxos
+ splunk:
+ redirect: splunk.es.splunk
+ qradar:
+ redirect: ibm.qradar.qradar
+ inventory:
+ # test entry
+ formerly_core_inventory:
+ redirect: testns.content_adj.statichost
+ cloudscale:
+ redirect: cloudscale_ch.cloud.inventory
+ docker_machine:
+ redirect: community.docker.docker_machine
+ docker_swarm:
+ redirect: community.docker.docker_swarm
+ gitlab_runners:
+ redirect: community.general.gitlab_runners
+ kubevirt:
+ redirect: community.kubevirt.kubevirt
+ linode:
+ redirect: community.general.linode
+ nmap:
+ redirect: community.general.nmap
+ online:
+ redirect: community.general.online
+ scaleway:
+ redirect: community.general.scaleway
+ virtualbox:
+ redirect: community.general.virtualbox
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ k8s:
+ redirect: kubernetes.core.k8s
+ openshift:
+ redirect: kubernetes.core.openshift
+ vmware_vm_inventory:
+ redirect: community.vmware.vmware_vm_inventory
+ aws_ec2:
+ redirect: amazon.aws.aws_ec2
+ aws_rds:
+ redirect: amazon.aws.aws_rds
+ foreman:
+ redirect: theforeman.foreman.foreman
+ netbox:
+ redirect: netbox.netbox.nb_inventory
+ openstack:
+ redirect: openstack.cloud.openstack
+ tower:
+ redirect: awx.awx.tower
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ gcp_compute:
+ redirect: google.cloud.gcp_compute
+ azure_rm:
+ redirect: azure.azcollection.azure_rm
+ lookup:
+ # test entry
+ formerly_core_lookup:
+ redirect: testns.testcoll.mylookup
+ avi:
+ redirect: community.network.avi
+ cartesian:
+ redirect: community.general.cartesian
+ chef_databag:
+ redirect: community.general.chef_databag
+ conjur_variable:
+ redirect: cyberark.conjur.conjur_variable
+ consul_kv:
+ redirect: community.general.consul_kv
+ credstash:
+ redirect: community.general.credstash
+ cyberarkpassword:
+ redirect: community.general.cyberarkpassword
+ dig:
+ redirect: community.general.dig
+ dnstxt:
+ redirect: community.general.dnstxt
+ etcd:
+ redirect: community.general.etcd
+ filetree:
+ redirect: community.general.filetree
+ flattened:
+ redirect: community.general.flattened
+ gcp_storage_file:
+ redirect: community.google.gcp_storage_file
+ hashi_vault:
+ redirect: community.hashi_vault.hashi_vault
+ hiera:
+ redirect: community.general.hiera
+ keyring:
+ redirect: community.general.keyring
+ lastpass:
+ redirect: community.general.lastpass
+ lmdb_kv:
+ redirect: community.general.lmdb_kv
+ manifold:
+ redirect: community.general.manifold
+ nios:
+ redirect: community.general.nios
+ nios_next_ip:
+ redirect: community.general.nios_next_ip
+ nios_next_network:
+ redirect: community.general.nios_next_network
+ onepassword:
+ redirect: community.general.onepassword
+ onepassword_raw:
+ redirect: community.general.onepassword_raw
+ passwordstore:
+ redirect: community.general.passwordstore
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ redis:
+ redirect: community.general.redis
+ shelvefile:
+ redirect: community.general.shelvefile
+ grafana_dashboard:
+ redirect: community.grafana.grafana_dashboard
+ openshift:
+ redirect: kubernetes.core.openshift
+ k8s:
+ redirect: kubernetes.core.k8s
+ mongodb:
+ redirect: community.mongodb.mongodb
+ laps_password:
+ redirect: community.windows.laps_password
+ aws_account_attribute:
+ redirect: amazon.aws.aws_account_attribute
+ aws_secret:
+ redirect: amazon.aws.aws_secret
+ aws_service_ip_ranges:
+ redirect: amazon.aws.aws_service_ip_ranges
+ aws_ssm:
+ redirect: amazon.aws.aws_ssm
+ skydive:
+ redirect: community.skydive.skydive
+ cpm_metering:
+ redirect: wti.remote.cpm_metering
+ cpm_status:
+ redirect: wti.remote.cpm_status
+ netconf:
+ ce:
+ redirect: community.network.ce
+ sros:
+ redirect: community.network.sros
+ default:
+ redirect: ansible.netcommon.default
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ junos:
+ redirect: junipernetworks.junos.junos
+ shell:
+ # test entry
+ formerly_core_powershell:
+ redirect: ansible.builtin.powershell
+ csh:
+ redirect: ansible.posix.csh
+ fish:
+ redirect: ansible.posix.fish
+ test:
+ # test entries
+ formerly_core_test:
+ redirect: ansible.builtin.search
+ formerly_core_masked_test:
+ redirect: ansible.builtin.search
+import_redirection:
+ # test entry
+ ansible.module_utils.formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ ansible.module_utils.known_hosts:
+ redirect: ansible_collections.community.general.plugins.module_utils.known_hosts
+ # ansible.builtin synthetic collection redirection hackery
+ ansible_collections.ansible.builtin.plugins.modules:
+ redirect: ansible.modules
+ ansible_collections.ansible.builtin.plugins.module_utils:
+ redirect: ansible.module_utils
+ ansible_collections.ansible.builtin.plugins:
+ redirect: ansible.plugins
+action_groups:
+ testgroup:
+ # The list items under a group should always be action/module name strings except
+ # for a special 'metadata' dictionary.
+ # The only valid key currently for the metadata dictionary is 'extend_group', which is a
+ # list of other groups, the actions of which will be included in this group.
+ # (Note: it's still possible to also have a module/action named 'metadata' in the list)
+ - metadata:
+ extend_group:
+ - testns.testcoll.testgroup
+ - testns.testcoll.anothergroup
+ - testns.boguscoll.testgroup
+ - ping
+ - legacy_ping # Includes ansible.builtin.legacy_ping, not ansible.legacy.legacy_ping
+ - formerly_core_ping
+ testlegacy:
+ - ansible.legacy.legacy_ping
+ aws:
+ - metadata:
+ extend_group:
+ - amazon.aws.aws
+ - community.aws.aws
+ acme:
+ - metadata:
+ extend_group:
+ - community.crypto.acme
+ azure:
+ - metadata:
+ extend_group:
+ - azure.azcollection.azure
+ cpm:
+ - metadata:
+ extend_group:
+ - wti.remote.cpm
+ docker:
+ - metadata:
+ extend_group:
+ - community.general.docker
+ - community.docker.docker
+ gcp:
+ - metadata:
+ extend_group:
+ - google.cloud.gcp
+ k8s:
+ - metadata:
+ extend_group:
+ - community.kubernetes.k8s
+ - community.general.k8s
+ - community.kubevirt.k8s
+ - community.okd.k8s
+ - kubernetes.core.k8s
+ os:
+ - metadata:
+ extend_group:
+ - openstack.cloud.os
+ ovirt:
+ - metadata:
+ extend_group:
+ - ovirt.ovirt.ovirt
+ - community.general.ovirt
+ vmware:
+ - metadata:
+ extend_group:
+ - community.vmware.vmware
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
new file mode 100644
index 0000000..664eb10
--- /dev/null
+++ b/lib/ansible/config/base.yml
@@ -0,0 +1,2067 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+ANSIBLE_HOME:
+ name: The Ansible home path
+ description:
+ - The default root path for Ansible config files on the controller.
+ default: ~/.ansible
+ env:
+ - name: ANSIBLE_HOME
+ ini:
+ - key: home
+ section: defaults
+ type: path
+ version_added: '2.14'
+ANSIBLE_CONNECTION_PATH:
+ name: Path of ansible-connection script
+ default: null
+ description:
+ - Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
+ - If null, ansible will start with the same directory as the ansible script.
+ type: path
+ env: [{name: ANSIBLE_CONNECTION_PATH}]
+ ini:
+ - {key: ansible_connection_path, section: persistent_connection}
+ yaml: {key: persistent_connection.ansible_connection_path}
+ version_added: "2.8"
+ANSIBLE_COW_SELECTION:
+ name: Cowsay filter selection
+ default: default
+ description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
+ env: [{name: ANSIBLE_COW_SELECTION}]
+ ini:
+ - {key: cow_selection, section: defaults}
+ANSIBLE_COW_ACCEPTLIST:
+ name: Cowsay filter acceptance list
+ default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
+ description: Accept list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
+ env:
+ - name: ANSIBLE_COW_WHITELIST
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'ANSIBLE_COW_ACCEPTLIST'
+ - name: ANSIBLE_COW_ACCEPTLIST
+ version_added: '2.11'
+ ini:
+ - key: cow_whitelist
+ section: defaults
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'cowsay_enabled_stencils'
+ - key: cowsay_enabled_stencils
+ section: defaults
+ version_added: '2.11'
+ type: list
+ANSIBLE_FORCE_COLOR:
+ name: Force color output
+ default: False
+ description: This option forces color mode even when running without a TTY or the "nocolor" setting is True.
+ env: [{name: ANSIBLE_FORCE_COLOR}]
+ ini:
+ - {key: force_color, section: defaults}
+ type: boolean
+ yaml: {key: display.force_color}
+ANSIBLE_NOCOLOR:
+ name: Suppress color output
+ default: False
+ description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
+ env:
+ - name: ANSIBLE_NOCOLOR
+ # this is generic convention for CLI programs
+ - name: NO_COLOR
+ version_added: '2.11'
+ ini:
+ - {key: nocolor, section: defaults}
+ type: boolean
+ yaml: {key: display.nocolor}
+ANSIBLE_NOCOWS:
+ name: Suppress cowsay output
+ default: False
+ description: If you have cowsay installed but want to avoid the 'cows' (why????), use this.
+ env: [{name: ANSIBLE_NOCOWS}]
+ ini:
+ - {key: nocows, section: defaults}
+ type: boolean
+ yaml: {key: display.i_am_no_fun}
+ANSIBLE_COW_PATH:
+ name: Set path to cowsay command
+ default: null
+ description: Specify a custom cowsay path or swap in your cowsay implementation of choice
+ env: [{name: ANSIBLE_COW_PATH}]
+ ini:
+ - {key: cowpath, section: defaults}
+ type: string
+ yaml: {key: display.cowpath}
+ANSIBLE_PIPELINING:
+ name: Connection pipelining
+ default: False
+ description:
+ - This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
+ - Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server,
+ by executing many Ansible modules without actual file transfer.
+ - It can result in a very significant performance improvement when enabled.
+ - "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
+ disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
+ - This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
+ env:
+ - name: ANSIBLE_PIPELINING
+ ini:
+ - section: defaults
+ key: pipelining
+ - section: connection
+ key: pipelining
+ type: boolean
+ANY_ERRORS_FATAL:
+ name: Make Task failures fatal
+ default: False
+ description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
+ env:
+ - name: ANSIBLE_ANY_ERRORS_FATAL
+ ini:
+ - section: defaults
+ key: any_errors_fatal
+ type: boolean
+ yaml: {key: errors.any_task_errors_fatal}
+ version_added: "2.4"
+BECOME_ALLOW_SAME_USER:
+ name: Allow becoming the same user
+ default: False
+ description:
+ - This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
+ - If executable, it will be run and the resulting stdout will be used as the password.
+ env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}]
+ ini:
+ - {key: become_allow_same_user, section: privilege_escalation}
+ type: boolean
+ yaml: {key: privilege_escalation.become_allow_same_user}
+BECOME_PASSWORD_FILE:
+ name: Become password file
+ default: ~
+ description:
+ - 'The password file to use for the become plugin. --become-password-file.'
+ - If executable, it will be run and the resulting stdout will be used as the password.
+ env: [{name: ANSIBLE_BECOME_PASSWORD_FILE}]
+ ini:
+ - {key: become_password_file, section: defaults}
+ type: path
+ version_added: '2.12'
+AGNOSTIC_BECOME_PROMPT:
+ name: Display an agnostic become prompt
+ default: True
+ type: boolean
+ description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
+ env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}]
+ ini:
+ - {key: agnostic_become_prompt, section: privilege_escalation}
+ yaml: {key: privilege_escalation.agnostic_become_prompt}
+ version_added: "2.5"
+CACHE_PLUGIN:
+ name: Persistent Cache plugin
+ default: memory
+ description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
+ env: [{name: ANSIBLE_CACHE_PLUGIN}]
+ ini:
+ - {key: fact_caching, section: defaults}
+ yaml: {key: facts.cache.plugin}
+CACHE_PLUGIN_CONNECTION:
+ name: Cache Plugin URI
+ default: ~
+ description: Defines connection or path information for the cache plugin
+ env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
+ ini:
+ - {key: fact_caching_connection, section: defaults}
+ yaml: {key: facts.cache.uri}
+CACHE_PLUGIN_PREFIX:
+ name: Cache Plugin table prefix
+ default: ansible_facts
+ description: Prefix to use for cache plugin files/tables
+ env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
+ ini:
+ - {key: fact_caching_prefix, section: defaults}
+ yaml: {key: facts.cache.prefix}
+CACHE_PLUGIN_TIMEOUT:
+ name: Cache Plugin expiration timeout
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
+ ini:
+ - {key: fact_caching_timeout, section: defaults}
+ type: integer
+ yaml: {key: facts.cache.timeout}
+COLLECTIONS_SCAN_SYS_PATH:
+ name: Scan PYTHONPATH for installed collections
+ description: A boolean to enable or disable scanning the sys.path for installed collections
+ default: true
+ type: boolean
+ env:
+ - {name: ANSIBLE_COLLECTIONS_SCAN_SYS_PATH}
+ ini:
+ - {key: collections_scan_sys_path, section: defaults}
+COLLECTIONS_PATHS:
+ name: ordered list of root paths for loading installed Ansible collections content
+ description: >
+ Colon separated paths in which Ansible will search for collections content.
+ Collections must be in nested *subdirectories*, not directly in these directories.
+ For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``,
+ and you want to add ``my.collection`` to that directory, it must be saved as
+ ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
+ default: '{{ ANSIBLE_HOME ~ "/collections:/usr/share/ansible/collections" }}'
+ type: pathspec
+ env:
+ - name: ANSIBLE_COLLECTIONS_PATHS # TODO: Deprecate this and ini once PATH has been in a few releases.
+ - name: ANSIBLE_COLLECTIONS_PATH
+ version_added: '2.10'
+ ini:
+ - key: collections_paths
+ section: defaults
+ - key: collections_path
+ section: defaults
+ version_added: '2.10'
+COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
+ name: Defines behavior when loading a collection that does not support the current Ansible version
+ description:
+ - When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
+ env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
+ ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
+ choices: &basic_error
+ error: issue a 'fatal' error and stop the play
+ warning: issue a warning but continue
+ ignore: just continue silently
+ default: warning
+_COLOR_DEFAULTS: &color
+ name: placeholder for color settings' defaults
+ choices: ['black', 'bright gray', 'blue', 'white', 'green', 'bright blue', 'cyan', 'bright green', 'red', 'bright cyan', 'purple', 'bright red', 'yellow', 'bright purple', 'dark gray', 'bright yellow', 'magenta', 'bright magenta', 'normal']
+COLOR_CHANGED:
+ <<: *color
+ name: Color for 'changed' task status
+ default: yellow
+ description: Defines the color to use on 'Changed' task status
+ env: [{name: ANSIBLE_COLOR_CHANGED}]
+ ini:
+ - {key: changed, section: colors}
+COLOR_CONSOLE_PROMPT:
+ <<: *color
+ name: "Color for ansible-console's prompt task status"
+ default: white
+ description: Defines the default color to use for ansible-console
+ env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}]
+ ini:
+ - {key: console_prompt, section: colors}
+ version_added: "2.7"
+COLOR_DEBUG:
+ <<: *color
+ name: Color for debug statements
+ default: dark gray
+ description: Defines the color to use when emitting debug messages
+ env: [{name: ANSIBLE_COLOR_DEBUG}]
+ ini:
+ - {key: debug, section: colors}
+COLOR_DEPRECATE:
+ <<: *color
+ name: Color for deprecation messages
+ default: purple
+ description: Defines the color to use when emitting deprecation messages
+ env: [{name: ANSIBLE_COLOR_DEPRECATE}]
+ ini:
+ - {key: deprecate, section: colors}
+COLOR_DIFF_ADD:
+ <<: *color
+ name: Color for diff added display
+ default: green
+ description: Defines the color to use when showing added lines in diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_ADD}]
+ ini:
+ - {key: diff_add, section: colors}
+ yaml: {key: display.colors.diff.add}
+COLOR_DIFF_LINES:
+ <<: *color
+ name: Color for diff lines display
+ default: cyan
+ description: Defines the color to use when showing diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_LINES}]
+ ini:
+ - {key: diff_lines, section: colors}
+COLOR_DIFF_REMOVE:
+ <<: *color
+ name: Color for diff removed display
+ default: red
+ description: Defines the color to use when showing removed lines in diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}]
+ ini:
+ - {key: diff_remove, section: colors}
+COLOR_ERROR:
+ <<: *color
+ name: Color for error messages
+ default: red
+ description: Defines the color to use when emitting error messages
+ env: [{name: ANSIBLE_COLOR_ERROR}]
+ ini:
+ - {key: error, section: colors}
+ yaml: {key: colors.error}
+COLOR_HIGHLIGHT:
+ <<: *color
+ name: Color for highlighting
+ default: white
+ description: Defines the color to use for highlighting
+ env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
+ ini:
+ - {key: highlight, section: colors}
+COLOR_OK:
+ <<: *color
+ name: Color for 'ok' task status
+ default: green
+ description: Defines the color to use when showing 'OK' task status
+ env: [{name: ANSIBLE_COLOR_OK}]
+ ini:
+ - {key: ok, section: colors}
+COLOR_SKIP:
+ <<: *color
+ name: Color for 'skip' task status
+ default: cyan
+ description: Defines the color to use when showing 'Skipped' task status
+ env: [{name: ANSIBLE_COLOR_SKIP}]
+ ini:
+ - {key: skip, section: colors}
+COLOR_UNREACHABLE:
+ <<: *color
+ name: Color for 'unreachable' host state
+ default: bright red
+ description: Defines the color to use on 'Unreachable' status
+ env: [{name: ANSIBLE_COLOR_UNREACHABLE}]
+ ini:
+ - {key: unreachable, section: colors}
+COLOR_VERBOSE:
+ <<: *color
+ name: Color for verbose messages
+ default: blue
+ description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
+ env: [{name: ANSIBLE_COLOR_VERBOSE}]
+ ini:
+ - {key: verbose, section: colors}
+COLOR_WARN:
+ <<: *color
+ name: Color for warning messages
+ default: bright purple
+ description: Defines the color to use when emitting warning messages
+ env: [{name: ANSIBLE_COLOR_WARN}]
+ ini:
+ - {key: warn, section: colors}
+CONNECTION_PASSWORD_FILE:
+ name: Connection password file
+ default: ~
+ description: 'The password file to use for the connection plugin. --connection-password-file.'
+ env: [{name: ANSIBLE_CONNECTION_PASSWORD_FILE}]
+ ini:
+ - {key: connection_password_file, section: defaults}
+ type: path
+ version_added: '2.12'
+COVERAGE_REMOTE_OUTPUT:
+ name: Sets the output directory and filename prefix to generate coverage run info.
+ description:
+ - Sets the output directory on the remote host to generate coverage reports to.
+ - Currently only used for remote coverage on PowerShell modules.
+ - This is for internal use only.
+ env:
+ - {name: _ANSIBLE_COVERAGE_REMOTE_OUTPUT}
+ vars:
+ - {name: _ansible_coverage_remote_output}
+ type: str
+ version_added: '2.9'
+COVERAGE_REMOTE_PATHS:
+ name: Sets the list of paths to run coverage for.
+ description:
+ - A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
+ - Only files that match the path glob will have its coverage collected.
+ - Multiple path globs can be specified and are separated by ``:``.
+ - Currently only used for remote coverage on PowerShell modules.
+ - This is for internal use only.
+ default: '*'
+ env:
+ - {name: _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER}
+ type: str
+ version_added: '2.9'
+ACTION_WARNINGS:
+ name: Toggle action warnings
+ default: True
+ description:
+ - By default Ansible will issue a warning when received from a task action (module or action plugin)
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_ACTION_WARNINGS}]
+ ini:
+ - {key: action_warnings, section: defaults}
+ type: boolean
+ version_added: "2.5"
+LOCALHOST_WARNING:
+ name: Warning when using implicit inventory with only localhost
+ default: True
+ description:
+ - By default Ansible will issue a warning when there are no hosts in the
+ inventory.
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_LOCALHOST_WARNING}]
+ ini:
+ - {key: localhost_warning, section: defaults}
+ type: boolean
+ version_added: "2.6"
+INVENTORY_UNPARSED_WARNING:
+ name: Warning when no inventory files can be parsed, resulting in an implicit inventory with only localhost
+ default: True
+ description:
+ - By default Ansible will issue a warning when no inventory was loaded and notes that
+ it will use an implicit localhost-only inventory.
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_INVENTORY_UNPARSED_WARNING}]
+ ini:
+ - {key: inventory_unparsed_warning, section: inventory}
+ type: boolean
+ version_added: "2.14"
+DOC_FRAGMENT_PLUGIN_PATH:
+ name: documentation fragment plugins path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments" }}'
+ description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
+ env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}]
+ ini:
+ - {key: doc_fragment_plugins, section: defaults}
+ type: pathspec
+DEFAULT_ACTION_PLUGIN_PATH:
+ name: Action plugins path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/action:/usr/share/ansible/plugins/action" }}'
+ description: Colon separated paths in which Ansible will search for Action Plugins.
+ env: [{name: ANSIBLE_ACTION_PLUGINS}]
+ ini:
+ - {key: action_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.action.path}
+DEFAULT_ALLOW_UNSAFE_LOOKUPS:
+ name: Allow unsafe lookups
+ default: False
+ description:
+ - "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo)
+ to return data that is not marked 'unsafe'."
+ - By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language,
+ as this could represent a security risk. This option is provided to allow for backward compatibility,
+ however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run
+ through the templating engine late
+ env: []
+ ini:
+ - {key: allow_unsafe_lookups, section: defaults}
+ type: boolean
+ version_added: "2.2.3"
+DEFAULT_ASK_PASS:
+ name: Ask for the login password
+ default: False
+ description:
+ - This controls whether an Ansible playbook should prompt for a login password.
+ If using SSH keys for authentication, you probably do not need to change this setting.
+ env: [{name: ANSIBLE_ASK_PASS}]
+ ini:
+ - {key: ask_pass, section: defaults}
+ type: boolean
+ yaml: {key: defaults.ask_pass}
+DEFAULT_ASK_VAULT_PASS:
+ name: Ask for the vault password(s)
+ default: False
+ description:
+ - This controls whether an Ansible playbook should prompt for a vault password.
+ env: [{name: ANSIBLE_ASK_VAULT_PASS}]
+ ini:
+ - {key: ask_vault_pass, section: defaults}
+ type: boolean
+DEFAULT_BECOME:
+ name: Enable privilege escalation (become)
+ default: False
+ description: Toggles the use of privilege escalation, allowing you to 'become' another user after login.
+ env: [{name: ANSIBLE_BECOME}]
+ ini:
+ - {key: become, section: privilege_escalation}
+ type: boolean
+DEFAULT_BECOME_ASK_PASS:
+ name: Ask for the privilege escalation (become) password
+ default: False
+ description: Toggle to prompt for privilege escalation password.
+ env: [{name: ANSIBLE_BECOME_ASK_PASS}]
+ ini:
+ - {key: become_ask_pass, section: privilege_escalation}
+ type: boolean
+DEFAULT_BECOME_METHOD:
+ name: Choose privilege escalation method
+ default: 'sudo'
+ description: Privilege escalation method to use when `become` is enabled.
+ env: [{name: ANSIBLE_BECOME_METHOD}]
+ ini:
+ - {section: privilege_escalation, key: become_method}
+DEFAULT_BECOME_EXE:
+ name: Choose 'become' executable
+ default: ~
+ description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH'
+ env: [{name: ANSIBLE_BECOME_EXE}]
+ ini:
+ - {key: become_exe, section: privilege_escalation}
+DEFAULT_BECOME_FLAGS:
+ name: Set 'become' executable options
+ default: ~
+ description: Flags to pass to the privilege escalation executable.
+ env: [{name: ANSIBLE_BECOME_FLAGS}]
+ ini:
+ - {key: become_flags, section: privilege_escalation}
+BECOME_PLUGIN_PATH:
+ name: Become plugins path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/become:/usr/share/ansible/plugins/become" }}'
+ description: Colon separated paths in which Ansible will search for Become Plugins.
+ env: [{name: ANSIBLE_BECOME_PLUGINS}]
+ ini:
+ - {key: become_plugins, section: defaults}
+ type: pathspec
+ version_added: "2.8"
+DEFAULT_BECOME_USER:
+ # FIXME: should really be blank and make -u passing optional depending on it
+ name: Set the user you 'become' via privilege escalation
+ default: root
+ description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
+ env: [{name: ANSIBLE_BECOME_USER}]
+ ini:
+ - {key: become_user, section: privilege_escalation}
+ yaml: {key: become.user}
+DEFAULT_CACHE_PLUGIN_PATH:
+ name: Cache Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/cache:/usr/share/ansible/plugins/cache" }}'
+ description: Colon separated paths in which Ansible will search for Cache Plugins.
+ env: [{name: ANSIBLE_CACHE_PLUGINS}]
+ ini:
+ - {key: cache_plugins, section: defaults}
+ type: pathspec
+DEFAULT_CALLBACK_PLUGIN_PATH:
+ name: Callback Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/callback:/usr/share/ansible/plugins/callback" }}'
+ description: Colon separated paths in which Ansible will search for Callback Plugins.
+ env: [{name: ANSIBLE_CALLBACK_PLUGINS}]
+ ini:
+ - {key: callback_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.callback.path}
+CALLBACKS_ENABLED:
+ name: Enable callback plugins that require it.
+ default: []
+ description:
+ - "List of enabled callbacks, not all callbacks need enabling,
+ but many of those shipped with Ansible do as we don't want them activated by default."
+ env:
+ - name: ANSIBLE_CALLBACK_WHITELIST
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'ANSIBLE_CALLBACKS_ENABLED'
+ - name: ANSIBLE_CALLBACKS_ENABLED
+ version_added: '2.11'
+ ini:
+ - key: callback_whitelist
+ section: defaults
+ deprecated:
+ why: normalizing names to new standard
+ version: "2.15"
+ alternatives: 'callbacks_enabled'
+ - key: callbacks_enabled
+ section: defaults
+ version_added: '2.11'
+ type: list
+DEFAULT_CLICONF_PLUGIN_PATH:
+ name: Cliconf Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/cliconf:/usr/share/ansible/plugins/cliconf" }}'
+ description: Colon separated paths in which Ansible will search for Cliconf Plugins.
+ env: [{name: ANSIBLE_CLICONF_PLUGINS}]
+ ini:
+ - {key: cliconf_plugins, section: defaults}
+ type: pathspec
+DEFAULT_CONNECTION_PLUGIN_PATH:
+ name: Connection Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/connection:/usr/share/ansible/plugins/connection" }}'
+ description: Colon separated paths in which Ansible will search for Connection Plugins.
+ env: [{name: ANSIBLE_CONNECTION_PLUGINS}]
+ ini:
+ - {key: connection_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.connection.path}
+DEFAULT_DEBUG:
+ name: Debug mode
+ default: False
+ description:
+ - "Toggles debug output in Ansible. This is *very* verbose and can hinder
+ multiprocessing. Debug output can also include secret information
+ despite no_log settings being enabled, which means debug mode should not be used in
+ production."
+ env: [{name: ANSIBLE_DEBUG}]
+ ini:
+ - {key: debug, section: defaults}
+ type: boolean
+DEFAULT_EXECUTABLE:
+ name: Target shell executable
+ default: /bin/sh
+ description:
+ - "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target.
+ Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is."
+ env: [{name: ANSIBLE_EXECUTABLE}]
+ ini:
+ - {key: executable, section: defaults}
+DEFAULT_FACT_PATH:
+ name: local fact path
+ description:
+ - "This option allows you to globally configure a custom path for 'local_facts' for the implied :ref:`ansible_collections.ansible.builtin.setup_module` task when using fact gathering."
+ - "If not set, it will fallback to the default from the ``ansible.builtin.setup`` module: ``/etc/ansible/facts.d``."
+ - "This does **not** affect user defined tasks that use the ``ansible.builtin.setup`` module."
+ - The real action being created by the implicit task is currently ``ansible.legacy.gather_facts`` module, which then calls the configured fact modules,
+ by default this will be ``ansible.builtin.setup`` for POSIX systems but other platforms might have different defaults.
+ env: [{name: ANSIBLE_FACT_PATH}]
+ ini:
+ - {key: fact_path, section: defaults}
+ type: string
+ deprecated:
+ # TODO: when removing set playbook/play.py to default=None
+ why: the module_defaults keyword is a more generic version and can apply to all calls to the
+ M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
+ version: "2.18"
+ alternatives: module_defaults
+DEFAULT_FILTER_PLUGIN_PATH:
+ name: Jinja2 Filter Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/filter:/usr/share/ansible/plugins/filter" }}'
+ description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
+ env: [{name: ANSIBLE_FILTER_PLUGINS}]
+ ini:
+ - {key: filter_plugins, section: defaults}
+ type: pathspec
+DEFAULT_FORCE_HANDLERS:
+ name: Force handlers to run after failure
+ default: False
+ description:
+ - This option controls if notified handlers run on a host even if a failure occurs on that host.
+ - When false, the handlers will not run if a failure has occurred on a host.
+ - This can also be set per play or on the command line. See Handlers and Failure for more details.
+ env: [{name: ANSIBLE_FORCE_HANDLERS}]
+ ini:
+ - {key: force_handlers, section: defaults}
+ type: boolean
+ version_added: "1.9.1"
+DEFAULT_FORKS:
+ name: Number of task forks
+ default: 5
+ description: Maximum number of forks Ansible will use to execute tasks on target hosts.
+ env: [{name: ANSIBLE_FORKS}]
+ ini:
+ - {key: forks, section: defaults}
+ type: integer
+DEFAULT_GATHERING:
+ name: Gathering behaviour
+ default: 'implicit'
+ description:
+ - This setting controls the default policy of fact gathering (facts discovered about remote systems).
+ - "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin."
+ env: [{name: ANSIBLE_GATHERING}]
+ ini:
+ - key: gathering
+ section: defaults
+ version_added: "1.6"
+ choices:
+ implicit: "the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
+ explicit: facts will not be gathered unless directly requested in the play.
+ smart: each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the run.
+DEFAULT_GATHER_SUBSET:
+ name: Gather facts subset
+ description:
+ - Set the `gather_subset` option for the :ref:`ansible_collections.ansible.builtin.setup_module` task in the implicit fact gathering.
+ See the module documentation for specifics.
+ - "It does **not** apply to user defined ``ansible.builtin.setup`` tasks."
+ env: [{name: ANSIBLE_GATHER_SUBSET}]
+ ini:
+ - key: gather_subset
+ section: defaults
+ version_added: "2.1"
+ type: list
+ deprecated:
+ # TODO: when removing set playbook/play.py to default=None
+ why: the module_defaults keyword is a more generic version and can apply to all calls to the
+ M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
+ version: "2.18"
+ alternatives: module_defaults
+DEFAULT_GATHER_TIMEOUT:
+ name: Gather facts timeout
+ description:
+ - Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
+ - "It does **not** apply to user defined :ref:`ansible_collections.ansible.builtin.setup_module` tasks."
+ env: [{name: ANSIBLE_GATHER_TIMEOUT}]
+ ini:
+ - {key: gather_timeout, section: defaults}
+ type: integer
+ deprecated:
+ # TODO: when removing set playbook/play.py to default=None
+ why: the module_defaults keyword is a more generic version and can apply to all calls to the
+ M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
+ version: "2.18"
+ alternatives: module_defaults
+DEFAULT_HASH_BEHAVIOUR:
+ name: Hash merge behaviour
+ default: replace
+ type: string
+ choices:
+ replace: Any variable that is defined more than once is overwritten using the order from variable precedence rules (highest wins).
+ merge: Any dictionary variable will be recursively merged with new definitions across the different variable definition sources.
+ description:
+ - This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
+ - This does not affect variables whose values are scalars (integers, strings) or arrays.
+ - "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable,
+ leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it."
+ - We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups
+ to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much
+ complexity has been introduced into the data structures and plays.
+ - For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars``
+ that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope,
+ but the setting itself affects all sources and makes debugging even harder.
+ - All playbooks and roles in the official examples repos assume the default for this setting.
+ - Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables.
+ For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
+ - The Ansible project recommends you **avoid ``merge`` for new projects.**
+ - It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it.
+ New projects should **avoid 'merge'**.
+ env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
+ ini:
+ - {key: hash_behaviour, section: defaults}
+DEFAULT_HOST_LIST:
+ name: Inventory Source
+ default: /etc/ansible/hosts
+ description: Comma separated list of Ansible inventory sources
+ env:
+ - name: ANSIBLE_INVENTORY
+ expand_relative_paths: True
+ ini:
+ - key: inventory
+ section: defaults
+ type: pathlist
+ yaml: {key: defaults.inventory}
+DEFAULT_HTTPAPI_PLUGIN_PATH:
+ name: HttpApi Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/httpapi:/usr/share/ansible/plugins/httpapi" }}'
+ description: Colon separated paths in which Ansible will search for HttpApi Plugins.
+ env: [{name: ANSIBLE_HTTPAPI_PLUGINS}]
+ ini:
+ - {key: httpapi_plugins, section: defaults}
+ type: pathspec
+DEFAULT_INTERNAL_POLL_INTERVAL:
+ name: Internal poll interval
+ default: 0.001
+ env: []
+ ini:
+ - {key: internal_poll_interval, section: defaults}
+ type: float
+ version_added: "2.2"
+ description:
+ - This sets the interval (in seconds) of Ansible internal processes polling each other.
+ Lower values improve performance with large playbooks at the expense of extra CPU load.
+ Higher values are more suitable for Ansible usage in automation scenarios,
+ when UI responsiveness is not required but CPU usage might be a concern.
+ - "The default corresponds to the value hardcoded in Ansible <= 2.1"
+DEFAULT_INVENTORY_PLUGIN_PATH:
+ name: Inventory Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/inventory:/usr/share/ansible/plugins/inventory" }}'
+ description: Colon separated paths in which Ansible will search for Inventory Plugins.
+ env: [{name: ANSIBLE_INVENTORY_PLUGINS}]
+ ini:
+ - {key: inventory_plugins, section: defaults}
+ type: pathspec
+DEFAULT_JINJA2_EXTENSIONS:
+ name: Enabled Jinja2 extensions
+ default: []
+ description:
+ - This is a developer-specific feature that allows enabling additional Jinja2 extensions.
+ - "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
+ env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
+ ini:
+ - {key: jinja2_extensions, section: defaults}
+DEFAULT_JINJA2_NATIVE:
+ name: Use Jinja2's NativeEnvironment for templating
+ default: False
+ description: This option preserves variable types during template operations.
+ env: [{name: ANSIBLE_JINJA2_NATIVE}]
+ ini:
+ - {key: jinja2_native, section: defaults}
+ type: boolean
+ yaml: {key: jinja2_native}
+ version_added: 2.7
+DEFAULT_KEEP_REMOTE_FILES:
+ name: Keep remote files
+ default: False
+ description:
+ - Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
+ - If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
+ env: [{name: ANSIBLE_KEEP_REMOTE_FILES}]
+ ini:
+ - {key: keep_remote_files, section: defaults}
+ type: boolean
+DEFAULT_LIBVIRT_LXC_NOSECLABEL:
+ # TODO: move to plugin
+ name: No security label on Lxc
+ default: False
+ description:
+ - "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh.
+ This is necessary when running on systems which do not have SELinux."
+ env:
+ - name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL
+ ini:
+ - {key: libvirt_lxc_noseclabel, section: selinux}
+ type: boolean
+ version_added: "2.1"
+DEFAULT_LOAD_CALLBACK_PLUGINS:
+ name: Load callbacks for adhoc
+ default: False
+ description:
+ - Controls whether callback plugins are loaded when running /usr/bin/ansible.
+ This may be used to log activity from the command line, send notifications, and so on.
+ Callback plugins are always loaded for ``ansible-playbook``.
+ env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}]
+ ini:
+ - {key: bin_ansible_callbacks, section: defaults}
+ type: boolean
+ version_added: "1.8"
+DEFAULT_LOCAL_TMP:
+ name: Controller temporary directory
+ default: '{{ ANSIBLE_HOME ~ "/tmp" }}'
+ description: Temporary directory for Ansible to use on the controller.
+ env: [{name: ANSIBLE_LOCAL_TEMP}]
+ ini:
+ - {key: local_tmp, section: defaults}
+ type: tmppath
+DEFAULT_LOG_PATH:
+ name: Ansible log file path
+ default: ~
+ description: File to which Ansible will log on the controller. When empty logging is disabled.
+ env: [{name: ANSIBLE_LOG_PATH}]
+ ini:
+ - {key: log_path, section: defaults}
+ type: path
+DEFAULT_LOG_FILTER:
+ name: Name filters for python logger
+ default: []
+ description: List of logger names to filter out of the log file
+ env: [{name: ANSIBLE_LOG_FILTER}]
+ ini:
+ - {key: log_filter, section: defaults}
+ type: list
+DEFAULT_LOOKUP_PLUGIN_PATH:
+ name: Lookup Plugins Path
+ description: Colon separated paths in which Ansible will search for Lookup Plugins.
+ default: '{{ ANSIBLE_HOME ~ "/plugins/lookup:/usr/share/ansible/plugins/lookup" }}'
+ env: [{name: ANSIBLE_LOOKUP_PLUGINS}]
+ ini:
+ - {key: lookup_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: defaults.lookup_plugins}
+DEFAULT_MANAGED_STR:
+ name: Ansible managed
+ default: 'Ansible managed'
+ description: Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant for those two modules.
+ env: []
+ ini:
+ - {key: ansible_managed, section: defaults}
+ yaml: {key: defaults.ansible_managed}
+DEFAULT_MODULE_ARGS:
+ name: Adhoc default arguments
+ default: ~
+ description:
+ - This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
+ env: [{name: ANSIBLE_MODULE_ARGS}]
+ ini:
+ - {key: module_args, section: defaults}
+DEFAULT_MODULE_COMPRESSION:
+ name: Python module compression
+ default: ZIP_DEFLATED
+ description: Compression scheme to use when transferring Python modules to the target.
+ env: []
+ ini:
+ - {key: module_compression, section: defaults}
+# vars:
+# - name: ansible_module_compression
+DEFAULT_MODULE_NAME:
+ name: Default adhoc module
+ default: command
+ description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``."
+ env: []
+ ini:
+ - {key: module_name, section: defaults}
+DEFAULT_MODULE_PATH:
+ name: Modules Path
+ description: Colon separated paths in which Ansible will search for Modules.
+ default: '{{ ANSIBLE_HOME ~ "/plugins/modules:/usr/share/ansible/plugins/modules" }}'
+ env: [{name: ANSIBLE_LIBRARY}]
+ ini:
+ - {key: library, section: defaults}
+ type: pathspec
+DEFAULT_MODULE_UTILS_PATH:
+ name: Module Utils Path
+ description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
+ default: '{{ ANSIBLE_HOME ~ "/plugins/module_utils:/usr/share/ansible/plugins/module_utils" }}'
+ env: [{name: ANSIBLE_MODULE_UTILS}]
+ ini:
+ - {key: module_utils, section: defaults}
+ type: pathspec
+DEFAULT_NETCONF_PLUGIN_PATH:
+ name: Netconf Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/netconf:/usr/share/ansible/plugins/netconf" }}'
+ description: Colon separated paths in which Ansible will search for Netconf Plugins.
+ env: [{name: ANSIBLE_NETCONF_PLUGINS}]
+ ini:
+ - {key: netconf_plugins, section: defaults}
+ type: pathspec
+DEFAULT_NO_LOG:
+ name: No log
+ default: False
+ description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures."
+ env: [{name: ANSIBLE_NO_LOG}]
+ ini:
+ - {key: no_log, section: defaults}
+ type: boolean
+DEFAULT_NO_TARGET_SYSLOG:
+ name: No syslog on target
+ default: False
+ description:
+ - Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer
+ style PowerShell modules from writing to the event log.
+ env: [{name: ANSIBLE_NO_TARGET_SYSLOG}]
+ ini:
+ - {key: no_target_syslog, section: defaults}
+ vars:
+ - name: ansible_no_target_syslog
+ version_added: '2.10'
+ type: boolean
+ yaml: {key: defaults.no_target_syslog}
+DEFAULT_NULL_REPRESENTATION:
+ name: Represent a null
+ default: ~
+ description: What templating should return as a 'null' value. When not set it will let Jinja2 decide.
+ env: [{name: ANSIBLE_NULL_REPRESENTATION}]
+ ini:
+ - {key: null_representation, section: defaults}
+ type: raw
+DEFAULT_POLL_INTERVAL:
+ name: Async poll interval
+ default: 15
+ description:
+ - For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
+ this is how often to check back on the status of those tasks when an explicit poll interval is not supplied.
+ The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and
+ providing a quick turnaround when something may have completed.
+ env: [{name: ANSIBLE_POLL_INTERVAL}]
+ ini:
+ - {key: poll_interval, section: defaults}
+ type: integer
+DEFAULT_PRIVATE_KEY_FILE:
+ name: Private key file
+ default: ~
+ description:
+ - Option for connections using a certificate or key file to authenticate, rather than an agent or passwords,
+ you can set the default value here to avoid re-specifying --private-key with every invocation.
+ env: [{name: ANSIBLE_PRIVATE_KEY_FILE}]
+ ini:
+ - {key: private_key_file, section: defaults}
+ type: path
+DEFAULT_PRIVATE_ROLE_VARS:
+ name: Private role variables
+ default: False
+ description:
+ - Makes role variables inaccessible from other roles.
+ - This was introduced as a way to reset role variables to default values if
+ a role is used more than once in a playbook.
+ env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
+ ini:
+ - {key: private_role_vars, section: defaults}
+ type: boolean
+ yaml: {key: defaults.private_role_vars}
+DEFAULT_REMOTE_PORT:
+ name: Remote port
+ default: ~
+ description: Port to use in remote connections, when blank it will use the connection plugin default.
+ env: [{name: ANSIBLE_REMOTE_PORT}]
+ ini:
+ - {key: remote_port, section: defaults}
+ type: integer
+ yaml: {key: defaults.remote_port}
+DEFAULT_REMOTE_USER:
+ name: Login/Remote User
+ description:
+ - Sets the login user for the target machines
+ - "When blank it uses the connection plugin's default, normally the user currently executing Ansible."
+ env: [{name: ANSIBLE_REMOTE_USER}]
+ ini:
+ - {key: remote_user, section: defaults}
+DEFAULT_ROLES_PATH:
+ name: Roles path
+ default: '{{ ANSIBLE_HOME ~ "/roles:/usr/share/ansible/roles:/etc/ansible/roles" }}'
+ description: Colon separated paths in which Ansible will search for Roles.
+ env: [{name: ANSIBLE_ROLES_PATH}]
+ expand_relative_paths: True
+ ini:
+ - {key: roles_path, section: defaults}
+ type: pathspec
+ yaml: {key: defaults.roles_path}
+DEFAULT_SELINUX_SPECIAL_FS:
+ name: Problematic file systems
+ default: fuse, nfs, vboxsf, ramfs, 9p, vfat
+ description:
+ - "Some filesystems do not support safe operations and/or return inconsistent errors,
+ this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors."
+ - Data corruption may occur and writes are not always verified when a filesystem is in the list.
+ env:
+ - name: ANSIBLE_SELINUX_SPECIAL_FS
+ version_added: "2.9"
+ ini:
+ - {key: special_context_filesystems, section: selinux}
+ type: list
+DEFAULT_STDOUT_CALLBACK:
+ name: Main display callback plugin
+ default: default
+ description:
+ - "Set the main callback used to display Ansible output. You can only have one at a time."
+ - You can have many other callbacks, but just one can be in charge of stdout.
+ - See :ref:`callback_plugins` for a list of available options.
+ env: [{name: ANSIBLE_STDOUT_CALLBACK}]
+ ini:
+ - {key: stdout_callback, section: defaults}
+ENABLE_TASK_DEBUGGER:
+ name: Whether to enable the task debugger
+ default: False
+ description:
+ - Whether or not to enable the task debugger, this previously was done as a strategy plugin.
+ - Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
+ - a task is failed on unreachable. Use the debugger keyword for more flexibility.
+ type: boolean
+ env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}]
+ ini:
+ - {key: enable_task_debugger, section: defaults}
+ version_added: "2.5"
+TASK_DEBUGGER_IGNORE_ERRORS:
+ name: Whether a failed task with ignore_errors=True will still invoke the debugger
+ default: True
+ description:
+ - This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True
+ is specified.
+ - True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
+ type: boolean
+ env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}]
+ ini:
+ - {key: task_debugger_ignore_errors, section: defaults}
+ version_added: "2.7"
+DEFAULT_STRATEGY:
+ name: Implied strategy
+ default: 'linear'
+ description: Set the default strategy used for plays.
+ env: [{name: ANSIBLE_STRATEGY}]
+ ini:
+ - {key: strategy, section: defaults}
+ version_added: "2.3"
+DEFAULT_STRATEGY_PLUGIN_PATH:
+ name: Strategy Plugins Path
+ description: Colon separated paths in which Ansible will search for Strategy Plugins.
+ default: '{{ ANSIBLE_HOME ~ "/plugins/strategy:/usr/share/ansible/plugins/strategy" }}'
+ env: [{name: ANSIBLE_STRATEGY_PLUGINS}]
+ ini:
+ - {key: strategy_plugins, section: defaults}
+ type: pathspec
+DEFAULT_SU:
+ default: False
+ description: 'Toggle the use of "su" for tasks.'
+ env: [{name: ANSIBLE_SU}]
+ ini:
+ - {key: su, section: defaults}
+ type: boolean
+ yaml: {key: defaults.su}
+DEFAULT_SYSLOG_FACILITY:
+ name: syslog facility
+ default: LOG_USER
+ description: Syslog facility to use when Ansible logs to the remote target
+ env: [{name: ANSIBLE_SYSLOG_FACILITY}]
+ ini:
+ - {key: syslog_facility, section: defaults}
+DEFAULT_TERMINAL_PLUGIN_PATH:
+ name: Terminal Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/terminal:/usr/share/ansible/plugins/terminal" }}'
+ description: Colon separated paths in which Ansible will search for Terminal Plugins.
+ env: [{name: ANSIBLE_TERMINAL_PLUGINS}]
+ ini:
+ - {key: terminal_plugins, section: defaults}
+ type: pathspec
+DEFAULT_TEST_PLUGIN_PATH:
+ name: Jinja2 Test Plugins Path
+ description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
+ default: '{{ ANSIBLE_HOME ~ "/plugins/test:/usr/share/ansible/plugins/test" }}'
+ env: [{name: ANSIBLE_TEST_PLUGINS}]
+ ini:
+ - {key: test_plugins, section: defaults}
+ type: pathspec
+DEFAULT_TIMEOUT:
+ name: Connection timeout
+ default: 10
+ description: This is the default timeout for connection plugins to use.
+ env: [{name: ANSIBLE_TIMEOUT}]
+ ini:
+ - {key: timeout, section: defaults}
+ type: integer
+DEFAULT_TRANSPORT:
+ # note that ssh_utils refs this and needs to be updated if removed
+ name: Connection plugin
+ default: smart
+ description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions"
+ env: [{name: ANSIBLE_TRANSPORT}]
+ ini:
+ - {key: transport, section: defaults}
+DEFAULT_UNDEFINED_VAR_BEHAVIOR:
+ name: Jinja2 fail on undefined
+ default: True
+ version_added: "1.3"
+ description:
+ - When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
+ - "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written."
+ env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}]
+ ini:
+ - {key: error_on_undefined_vars, section: defaults}
+ type: boolean
+DEFAULT_VARS_PLUGIN_PATH:
+ name: Vars Plugins Path
+ default: '{{ ANSIBLE_HOME ~ "/plugins/vars:/usr/share/ansible/plugins/vars" }}'
+ description: Colon separated paths in which Ansible will search for Vars Plugins.
+ env: [{name: ANSIBLE_VARS_PLUGINS}]
+ ini:
+ - {key: vars_plugins, section: defaults}
+ type: pathspec
+# TODO: unused?
+#DEFAULT_VAR_COMPRESSION_LEVEL:
+# default: 0
+# description: 'TODO: write it'
+# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
+# ini:
+# - {key: var_compression_level, section: defaults}
+# type: integer
+# yaml: {key: defaults.var_compression_level}
+DEFAULT_VAULT_ID_MATCH:
+ name: Force vault id match
+ default: False
+ description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id'
+ env: [{name: ANSIBLE_VAULT_ID_MATCH}]
+ ini:
+ - {key: vault_id_match, section: defaults}
+ yaml: {key: defaults.vault_id_match}
+DEFAULT_VAULT_IDENTITY:
+ name: Vault id label
+ default: default
+ description: 'The label to use for the default vault id label in cases where a vault id label is not provided'
+ env: [{name: ANSIBLE_VAULT_IDENTITY}]
+ ini:
+ - {key: vault_identity, section: defaults}
+ yaml: {key: defaults.vault_identity}
+DEFAULT_VAULT_ENCRYPT_IDENTITY:
+ name: Vault id to use for encryption
+ description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.'
+ env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}]
+ ini:
+ - {key: vault_encrypt_identity, section: defaults}
+ yaml: {key: defaults.vault_encrypt_identity}
+DEFAULT_VAULT_IDENTITY_LIST:
+ name: Default vault ids
+ default: []
+ description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.'
+ env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}]
+ ini:
+ - {key: vault_identity_list, section: defaults}
+ type: list
+ yaml: {key: defaults.vault_identity_list}
+DEFAULT_VAULT_PASSWORD_FILE:
+ name: Vault password file
+ default: ~
+ description:
+ - 'The vault password file to use. Equivalent to --vault-password-file or --vault-id'
+ - If executable, it will be run and the resulting stdout will be used as the password.
+ env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}]
+ ini:
+ - {key: vault_password_file, section: defaults}
+ type: path
+ yaml: {key: defaults.vault_password_file}
+DEFAULT_VERBOSITY:
+ name: Verbosity
+ default: 0
+ description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
+ env: [{name: ANSIBLE_VERBOSITY}]
+ ini:
+ - {key: verbosity, section: defaults}
+ type: integer
+DEPRECATION_WARNINGS:
+ name: Deprecation messages
+ default: True
+ description: "Toggle to control the showing of deprecation warnings"
+ env: [{name: ANSIBLE_DEPRECATION_WARNINGS}]
+ ini:
+ - {key: deprecation_warnings, section: defaults}
+ type: boolean
+DEVEL_WARNING:
+ name: Running devel warning
+ default: True
+ description: Toggle to control showing warnings related to running devel
+ env: [{name: ANSIBLE_DEVEL_WARNING}]
+ ini:
+ - {key: devel_warning, section: defaults}
+ type: boolean
+DIFF_ALWAYS:
+ name: Show differences
+ default: False
+ description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
+ env: [{name: ANSIBLE_DIFF_ALWAYS}]
+ ini:
+ - {key: always, section: diff}
+ type: bool
+DIFF_CONTEXT:
+ name: Difference context
+ default: 3
+ description: How many lines of context to show when displaying the differences between files.
+ env: [{name: ANSIBLE_DIFF_CONTEXT}]
+ ini:
+ - {key: context, section: diff}
+ type: integer
+DISPLAY_ARGS_TO_STDOUT:
+ name: Show task arguments
+ default: False
+ description:
+ - "Normally ``ansible-playbook`` will print a header for each task that is run.
+ These headers will contain the name: field from the task if you specified one.
+ If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running.
+ Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action.
+ If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header."
+ - "This setting defaults to False because there is a chance that you have sensitive values in your parameters and
+ you do not want those to be printed."
+ - "If you set this to True you should be sure that you have secured your environment's stdout
+ (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or
+ made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values
+ See How do I keep secret data in my playbook? for more information."
+ env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}]
+ ini:
+ - {key: display_args_to_stdout, section: defaults}
+ type: boolean
+ version_added: "2.1"
+DISPLAY_SKIPPED_HOSTS:
+ name: Show skipped results
+ default: True
+ description: "Toggle to control displaying skipped task/host entries in a task in the default callback"
+ env:
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - {key: display_skipped_hosts, section: defaults}
+ type: boolean
+DOCSITE_ROOT_URL:
+ name: Root docsite URL
+ default: https://docs.ansible.com/ansible-core/
+ description: Root docsite URL used to generate docs URLs in warning/error text;
+ must be an absolute URL with valid scheme and trailing slash.
+ ini:
+ - {key: docsite_root_url, section: defaults}
+ version_added: "2.8"
+DUPLICATE_YAML_DICT_KEY:
+ name: Controls ansible behaviour when finding duplicate keys in YAML.
+ default: warn
+ description:
+ - By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}]
+ ini:
+ - {key: duplicate_dict_key, section: defaults}
+ type: string
+ choices: &basic_error2
+ error: issue a 'fatal' error and stop the play
+ warn: issue a warning but continue
+ ignore: just continue silently
+ version_added: "2.9"
+ERROR_ON_MISSING_HANDLER:
+ name: Missing handler error
+ default: True
+ description: "Toggle to allow missing handlers to become a warning instead of an error when notifying."
+ env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}]
+ ini:
+ - {key: error_on_missing_handler, section: defaults}
+ type: boolean
+CONNECTION_FACTS_MODULES:
+ name: Map of connections to fact modules
+ default:
+ # use ansible.legacy names on unqualified facts modules to allow library/ overrides
+ asa: ansible.legacy.asa_facts
+ cisco.asa.asa: cisco.asa.asa_facts
+ eos: ansible.legacy.eos_facts
+ arista.eos.eos: arista.eos.eos_facts
+ frr: ansible.legacy.frr_facts
+ frr.frr.frr: frr.frr.frr_facts
+ ios: ansible.legacy.ios_facts
+ cisco.ios.ios: cisco.ios.ios_facts
+ iosxr: ansible.legacy.iosxr_facts
+ cisco.iosxr.iosxr: cisco.iosxr.iosxr_facts
+ junos: ansible.legacy.junos_facts
+ junipernetworks.junos.junos: junipernetworks.junos.junos_facts
+ nxos: ansible.legacy.nxos_facts
+ cisco.nxos.nxos: cisco.nxos.nxos_facts
+ vyos: ansible.legacy.vyos_facts
+ vyos.vyos.vyos: vyos.vyos.vyos_facts
+ exos: ansible.legacy.exos_facts
+ extreme.exos.exos: extreme.exos.exos_facts
+ slxos: ansible.legacy.slxos_facts
+ extreme.slxos.slxos: extreme.slxos.slxos_facts
+ voss: ansible.legacy.voss_facts
+ extreme.voss.voss: extreme.voss.voss_facts
+ ironware: ansible.legacy.ironware_facts
+ community.network.ironware: community.network.ironware_facts
+ description: "Which modules to run during a play's fact gathering stage based on connection"
+ type: dict
+FACTS_MODULES:
+ name: Gather Facts Modules
+ default:
+ - smart
+ description:
+ - "Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type."
+ - "If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup'
+ or corresponding network module to the list (if you add 'smart', Ansible will also figure it out)."
+ - "This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit)."
+ env: [{name: ANSIBLE_FACTS_MODULES}]
+ ini:
+ - {key: facts_modules, section: defaults}
+ type: list
+ vars:
+ - name: ansible_facts_modules
+GALAXY_IGNORE_CERTS:
+ name: Galaxy validate certs
+ description:
+ - If set to yes, ansible-galaxy will not validate TLS certificates.
+ This can be useful for testing against a server with a self-signed certificate.
+ env: [{name: ANSIBLE_GALAXY_IGNORE}]
+ ini:
+ - {key: ignore_certs, section: galaxy}
+ type: boolean
+GALAXY_ROLE_SKELETON:
+ name: Galaxy role skeleton directory
+ description: Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
+ env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}]
+ ini:
+ - {key: role_skeleton, section: galaxy}
+ type: path
+GALAXY_ROLE_SKELETON_IGNORE:
+ name: Galaxy role skeleton ignore
+ default: ["^.git$", "^.*/.git_keep$"]
+ description: patterns of files to ignore inside a Galaxy role or collection skeleton directory
+ env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}]
+ ini:
+ - {key: role_skeleton_ignore, section: galaxy}
+ type: list
+GALAXY_COLLECTION_SKELETON:
+ name: Galaxy collection skeleton directory
+ description: Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
+ env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON}]
+ ini:
+ - {key: collection_skeleton, section: galaxy}
+ type: path
+GALAXY_COLLECTION_SKELETON_IGNORE:
+ name: Galaxy collection skeleton ignore
+ default: ["^.git$", "^.*/.git_keep$"]
+ description: patterns of files to ignore inside a Galaxy collection skeleton directory
+ env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON_IGNORE}]
+ ini:
+ - {key: collection_skeleton_ignore, section: galaxy}
+ type: list
+# TODO: unused?
+#GALAXY_SCMS:
+# name: Galaxy SCMS
+# default: git, hg
+# description: Available galaxy source control management systems.
+# env: [{name: ANSIBLE_GALAXY_SCMS}]
+# ini:
+# - {key: scms, section: galaxy}
+# type: list
+GALAXY_SERVER:
+ default: https://galaxy.ansible.com
+ description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
+ env: [{name: ANSIBLE_GALAXY_SERVER}]
+ ini:
+ - {key: server, section: galaxy}
+ yaml: {key: galaxy.server}
+GALAXY_SERVER_LIST:
+ description:
+ - A list of Galaxy servers to use when installing a collection.
+ - The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
+ - 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.'
+ - The order of servers in this list is used to as the order in which a collection is resolved.
+ - Setting this config option will ignore the :ref:`galaxy_server` config option.
+ env: [{name: ANSIBLE_GALAXY_SERVER_LIST}]
+ ini:
+ - {key: server_list, section: galaxy}
+ type: list
+ version_added: "2.9"
+GALAXY_TOKEN_PATH:
+ default: '{{ ANSIBLE_HOME ~ "/galaxy_token" }}'
+ description: "Local path to galaxy access token file"
+ env: [{name: ANSIBLE_GALAXY_TOKEN_PATH}]
+ ini:
+ - {key: token_path, section: galaxy}
+ type: path
+ version_added: "2.9"
+GALAXY_DISPLAY_PROGRESS:
+ default: ~
+ description:
+ - Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when
+ outputing the stdout to a file.
+ - This config option controls whether the display wheel is shown or not.
+ - The default is to show the display wheel if stdout has a tty.
+ env: [{name: ANSIBLE_GALAXY_DISPLAY_PROGRESS}]
+ ini:
+ - {key: display_progress, section: galaxy}
+ type: bool
+ version_added: "2.10"
+GALAXY_CACHE_DIR:
+ default: '{{ ANSIBLE_HOME ~ "/galaxy_cache" }}'
+ description:
+ - The directory that stores cached responses from a Galaxy server.
+ - This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
+ - Cache files inside this dir will be ignored if they are world writable.
+ env:
+ - name: ANSIBLE_GALAXY_CACHE_DIR
+ ini:
+ - section: galaxy
+ key: cache_dir
+ type: path
+ version_added: '2.11'
+GALAXY_DISABLE_GPG_VERIFY:
+ default: false
+ type: bool
+ env:
+ - name: ANSIBLE_GALAXY_DISABLE_GPG_VERIFY
+ ini:
+ - section: galaxy
+ key: disable_gpg_verify
+ description:
+ - Disable GPG signature verification during collection installation.
+ version_added: '2.13'
+GALAXY_GPG_KEYRING:
+ type: path
+ env:
+ - name: ANSIBLE_GALAXY_GPG_KEYRING
+ ini:
+ - section: galaxy
+ key: gpg_keyring
+ description:
+ - Configure the keyring used for GPG signature verification during collection installation and verification.
+ version_added: '2.13'
+GALAXY_IGNORE_INVALID_SIGNATURE_STATUS_CODES:
+ type: list
+ env:
+ - name: ANSIBLE_GALAXY_IGNORE_SIGNATURE_STATUS_CODES
+ ini:
+ - section: galaxy
+ key: ignore_signature_status_codes
+ description:
+ - A list of GPG status codes to ignore during GPG signature verification.
+ See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
+ - If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`,
+ signature verification will fail even if all error codes are ignored.
+ choices:
+ - EXPSIG
+ - EXPKEYSIG
+ - REVKEYSIG
+ - BADSIG
+ - ERRSIG
+ - NO_PUBKEY
+ - MISSING_PASSPHRASE
+ - BAD_PASSPHRASE
+ - NODATA
+ - UNEXPECTED
+ - ERROR
+ - FAILURE
+ - BADARMOR
+ - KEYEXPIRED
+ - KEYREVOKED
+ - NO_SECKEY
+GALAXY_REQUIRED_VALID_SIGNATURE_COUNT:
+ type: str
+ default: 1
+ env:
+ - name: ANSIBLE_GALAXY_REQUIRED_VALID_SIGNATURE_COUNT
+ ini:
+ - section: galaxy
+ key: required_valid_signature_count
+ description:
+ - The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
+ - This should be a positive integer or all to indicate all signatures must successfully validate the collection.
+ - Prepend + to the value to fail if no valid signatures are found for the collection.
+HOST_KEY_CHECKING:
+ # note: constant not in use by ssh plugin anymore
+ # TODO: check non ssh connection plugins for use/migration
+ name: Check host keys
+ default: True
+ description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
+ env: [{name: ANSIBLE_HOST_KEY_CHECKING}]
+ ini:
+ - {key: host_key_checking, section: defaults}
+ type: boolean
+HOST_PATTERN_MISMATCH:
+ name: Control host pattern mismatch behaviour
+ default: 'warning'
+ description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
+ env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}]
+ ini:
+ - {key: host_pattern_mismatch, section: inventory}
+ choices:
+ <<: *basic_error
+ version_added: "2.8"
+INTERPRETER_PYTHON:
+ name: Python interpreter path (or automatic discovery behavior) used for module execution
+ default: auto
+ env: [{name: ANSIBLE_PYTHON_INTERPRETER}]
+ ini:
+ - {key: interpreter_python, section: defaults}
+ vars:
+ - {name: ansible_python_interpreter}
+ version_added: "2.8"
+ description:
+ - Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
+ Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``.
+ All discovery modes employ a lookup table to use the included system Python (on distributions known to include one),
+ falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not
+ available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
+ installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or
+ ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility
+ with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
+_INTERPRETER_PYTHON_DISTRO_MAP:
+ name: Mapping of known included platform pythons for various Linux distros
+ default:
+ redhat:
+ '6': /usr/bin/python
+ '8': /usr/libexec/platform-python
+ '9': /usr/bin/python3
+ debian:
+ '8': /usr/bin/python
+ '10': /usr/bin/python3
+ fedora:
+ '23': /usr/bin/python3
+ ubuntu:
+ '14': /usr/bin/python
+ '16': /usr/bin/python3
+ version_added: "2.8"
+ # FUTURE: add inventory override once we're sure it can't be abused by a rogue target
+ # FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
+INTERPRETER_PYTHON_FALLBACK:
+ name: Ordered list of Python interpreters to check for in discovery
+ default:
+ - python3.11
+ - python3.10
+ - python3.9
+ - python3.8
+ - python3.7
+ - python3.6
+ - python3.5
+ - /usr/bin/python3
+ - /usr/libexec/platform-python
+ - python2.7
+ - /usr/bin/python
+ - python
+ vars:
+ - name: ansible_interpreter_python_fallback
+ type: list
+ version_added: "2.8"
+TRANSFORM_INVALID_GROUP_CHARS:
+ name: Transform invalid characters in group names
+ default: 'never'
+ description:
+ - Make ansible transform invalid characters in group names supplied by inventory sources.
+ env: [{name: ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS}]
+ ini:
+ - {key: force_valid_group_names, section: defaults}
+ type: string
+ choices:
+ always: it will replace any invalid characters with '_' (underscore) and warn the user
+ never: it will allow for the group name but warn about the issue
+ ignore: it does the same as 'never', without issuing a warning
+ silently: it does the same as 'always', without issuing a warning
+ version_added: '2.8'
+INVALID_TASK_ATTRIBUTE_FAILED:
+ name: Controls whether invalid attributes for a task result in errors instead of warnings
+ default: True
+ description: If 'false', invalid attributes for a task will result in warnings instead of errors
+ type: boolean
+ env:
+ - name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED
+ ini:
+ - key: invalid_task_attribute_failed
+ section: defaults
+ version_added: "2.7"
+INVENTORY_ANY_UNPARSED_IS_FAILED:
+ name: Controls whether any unparseable inventory source is a fatal error
+ default: False
+ description: >
+ If 'true', it is a fatal error when any given inventory source
+ cannot be successfully parsed by any available inventory plugin;
+ otherwise, this situation only attracts a warning.
+ type: boolean
+ env: [{name: ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED}]
+ ini:
+ - {key: any_unparsed_is_failed, section: inventory}
+ version_added: "2.7"
+INVENTORY_CACHE_ENABLED:
+ name: Inventory caching enabled
+ default: False
+ description:
+ - Toggle to turn on inventory caching.
+ - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
+ - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
+ - This message will be removed in 2.16.
+ env: [{name: ANSIBLE_INVENTORY_CACHE}]
+ ini:
+ - {key: cache, section: inventory}
+ type: bool
+INVENTORY_CACHE_PLUGIN:
+ name: Inventory cache plugin
+ description:
+ - The plugin for caching inventory.
+ - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
+ - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
+ - This message will be removed in 2.16.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}]
+ ini:
+ - {key: cache_plugin, section: inventory}
+INVENTORY_CACHE_PLUGIN_CONNECTION:
+ name: Inventory cache plugin URI to override the defaults section
+ description:
+ - The inventory cache connection.
+ - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
+ - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
+ - This message will be removed in 2.16.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}]
+ ini:
+ - {key: cache_connection, section: inventory}
+INVENTORY_CACHE_PLUGIN_PREFIX:
+ name: Inventory cache plugin table prefix
+ description:
+ - The table prefix for the cache plugin.
+ - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
+ - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
+ - This message will be removed in 2.16.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}]
+ default: ansible_inventory_
+ ini:
+ - {key: cache_prefix, section: inventory}
+INVENTORY_CACHE_TIMEOUT:
+ name: Inventory cache plugin expiration timeout
+ description:
+ - Expiration timeout for the inventory cache plugin data.
+ - This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
+ - The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
+ - This message will be removed in 2.16.
+ default: 3600
+ env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}]
+ ini:
+ - {key: cache_timeout, section: inventory}
+INVENTORY_ENABLED:
+ name: Active Inventory plugins
+ default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml']
+ description: List of enabled inventory plugins, it also determines the order in which they are used.
+ env: [{name: ANSIBLE_INVENTORY_ENABLED}]
+ ini:
+ - {key: enable_plugins, section: inventory}
+ type: list
+INVENTORY_EXPORT:
+ name: Set ansible-inventory into export mode
+ default: False
+ description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
+ env: [{name: ANSIBLE_INVENTORY_EXPORT}]
+ ini:
+ - {key: export, section: inventory}
+ type: bool
+INVENTORY_IGNORE_EXTS:
+ name: Inventory ignore extensions
+ default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
+ description: List of extensions to ignore when using a directory as an inventory source
+ env: [{name: ANSIBLE_INVENTORY_IGNORE}]
+ ini:
+ - {key: inventory_ignore_extensions, section: defaults}
+ - {key: ignore_extensions, section: inventory}
+ type: list
+INVENTORY_IGNORE_PATTERNS:
+ name: Inventory ignore patterns
+ default: []
+ description: List of patterns to ignore when using a directory as an inventory source
+ env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}]
+ ini:
+ - {key: inventory_ignore_patterns, section: defaults}
+ - {key: ignore_patterns, section: inventory}
+ type: list
+INVENTORY_UNPARSED_IS_FAILED:
+ name: Unparsed Inventory failure
+ default: False
+ description: >
+ If 'true' it is a fatal error if every single potential inventory
+ source fails to parse, otherwise this situation will only attract a
+ warning.
+ env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}]
+ ini:
+ - {key: unparsed_is_failed, section: inventory}
+ type: bool
+JINJA2_NATIVE_WARNING:
+ name: Running older than required Jinja version for jinja2_native warning
+ default: True
+ description: Toggle to control showing warnings related to running a Jinja version
+ older than required for jinja2_native
+ env:
+ - name: ANSIBLE_JINJA2_NATIVE_WARNING
+ deprecated:
+ why: This option is no longer used in the Ansible Core code base.
+ version: "2.17"
+ ini:
+ - {key: jinja2_native_warning, section: defaults}
+ type: boolean
+MAX_FILE_SIZE_FOR_DIFF:
+ name: Diff maximum file size
+ default: 104448
+ description: Maximum size of files to be considered for diff display
+ env: [{name: ANSIBLE_MAX_DIFF_SIZE}]
+ ini:
+ - {key: max_diff_size, section: defaults}
+ type: int
+NETWORK_GROUP_MODULES:
+ name: Network module families
+ default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos]
+ description: 'TODO: write it'
+ env:
+ - name: ANSIBLE_NETWORK_GROUP_MODULES
+ ini:
+ - {key: network_group_modules, section: defaults}
+ type: list
+ yaml: {key: defaults.network_group_modules}
+INJECT_FACTS_AS_VARS:
+ default: True
+ description:
+ - Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
+ - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
+ env: [{name: ANSIBLE_INJECT_FACT_VARS}]
+ ini:
+ - {key: inject_facts_as_vars, section: defaults}
+ type: boolean
+ version_added: "2.5"
+MODULE_IGNORE_EXTS:
+ name: Module ignore extensions
+ default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
+ description:
+ - List of extensions to ignore when looking for modules to load
+ - This is for rejecting script and binary module fallback extensions
+ env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
+ ini:
+ - {key: module_ignore_exts, section: defaults}
+ type: list
+OLD_PLUGIN_CACHE_CLEARING:
+ description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in prevoius plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
+ env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}]
+ ini:
+ - {key: old_plugin_cache_clear, section: defaults}
+ type: boolean
+ default: False
+ version_added: "2.8"
+PARAMIKO_HOST_KEY_AUTO_ADD:
+ # TODO: move to plugin
+ default: False
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
+ ini:
+ - {key: host_key_auto_add, section: paramiko_connection}
+ type: boolean
+PARAMIKO_LOOK_FOR_KEYS:
+ name: look for keys
+ default: True
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+PERSISTENT_CONTROL_PATH_DIR:
+ name: Persistence socket path
+ default: '{{ ANSIBLE_HOME ~ "/pc" }}'
+ description: Path to socket to be used by the connection persistence system.
+ env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}]
+ ini:
+ - {key: control_path_dir, section: persistent_connection}
+ type: path
+PERSISTENT_CONNECT_TIMEOUT:
+ name: Persistence timeout
+ default: 30
+ description: This controls how long the persistent connection will remain idle before it is destroyed.
+ env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}]
+ ini:
+ - {key: connect_timeout, section: persistent_connection}
+ type: integer
+PERSISTENT_CONNECT_RETRY_TIMEOUT:
+ name: Persistence connection retry timeout
+ default: 15
+ description: This controls the retry timeout for persistent connection to connect to the local domain socket.
+ env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}]
+ ini:
+ - {key: connect_retry_timeout, section: persistent_connection}
+ type: integer
+PERSISTENT_COMMAND_TIMEOUT:
+ name: Persistence command timeout
+ default: 30
+ description: This controls the amount of time to wait for response from remote device before timing out persistent connection.
+ env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}]
+ ini:
+ - {key: command_timeout, section: persistent_connection}
+ type: int
+PLAYBOOK_DIR:
+ name: playbook dir override for non-playbook CLIs (ala --playbook-dir)
+ version_added: "2.9"
+ description:
+ - A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
+ env: [{name: ANSIBLE_PLAYBOOK_DIR}]
+ ini: [{key: playbook_dir, section: defaults}]
+ type: path
+PLAYBOOK_VARS_ROOT:
+ name: playbook vars files root
+ default: top
+ version_added: "2.4.1"
+ description:
+ - This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
+ env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}]
+ ini:
+ - {key: playbook_vars_root, section: defaults}
+ choices:
+ top: follows the traditional behavior of using the top playbook in the chain to find the root directory.
+ bottom: follows the 2.4.0 behavior of using the current playbook to find the root directory.
+ all: examines from the first parent to the current playbook.
+PLUGIN_FILTERS_CFG:
+ name: Config file for limiting valid plugins
+ default: null
+ version_added: "2.5.0"
+ description:
+ - "A path to configuration for filtering which plugins installed on the system are allowed to be used."
+ - "See :ref:`plugin_filtering_config` for details of the filter file's format."
+ - " The default is /etc/ansible/plugin_filters.yml"
+ ini:
+ - key: plugin_filters_cfg
+ section: defaults
+ type: path
+PYTHON_MODULE_RLIMIT_NOFILE:
+ name: Adjust maximum file descriptor soft limit during Python module execution
+ description:
+ - Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on
+ Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default
+ value of 0 does not attempt to adjust existing system-defined limits.
+ default: 0
+ env:
+ - {name: ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE}
+ ini:
+ - {key: python_module_rlimit_nofile, section: defaults}
+ vars:
+ - {name: ansible_python_module_rlimit_nofile}
+ version_added: '2.8'
+RETRY_FILES_ENABLED:
+ name: Retry files
+ default: False
+ description: This controls whether a failed Ansible playbook should create a .retry file.
+ env: [{name: ANSIBLE_RETRY_FILES_ENABLED}]
+ ini:
+ - {key: retry_files_enabled, section: defaults}
+ type: bool
+RETRY_FILES_SAVE_PATH:
+ name: Retry files path
+ default: ~
+ description:
+ - This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
+ - This file will be overwritten after each run with the list of failed hosts from all plays.
+ env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}]
+ ini:
+ - {key: retry_files_save_path, section: defaults}
+ type: path
+RUN_VARS_PLUGINS:
+ name: When should vars plugins run relative to inventory
+ default: demand
+ description:
+ - This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
+ env: [{name: ANSIBLE_RUN_VARS_PLUGINS}]
+ ini:
+ - {key: run_vars_plugins, section: defaults}
+ type: str
+ choices:
+ demand: will run vars_plugins relative to inventory sources anytime vars are 'demanded' by tasks.
+ start: will run vars_plugins relative to inventory sources after importing that inventory source.
+ version_added: "2.10"
+SHOW_CUSTOM_STATS:
+ name: Display custom stats
+ default: False
+ description: 'This adds the custom stats set via the set_stats plugin to the default output'
+ env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}]
+ ini:
+ - {key: show_custom_stats, section: defaults}
+ type: bool
+STRING_TYPE_FILTERS:
+ name: Filters to preserve strings
+ default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
+ description:
+ - "This list of filters avoids 'type conversion' when templating variables"
+ - Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
+ env: [{name: ANSIBLE_STRING_TYPE_FILTERS}]
+ ini:
+ - {key: dont_type_filters, section: jinja2}
+ type: list
+SYSTEM_WARNINGS:
+ name: System warnings
+ default: True
+ description:
+ - Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
+ - These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
+ env: [{name: ANSIBLE_SYSTEM_WARNINGS}]
+ ini:
+ - {key: system_warnings, section: defaults}
+ type: boolean
+TAGS_RUN:
+ name: Run Tags
+ default: []
+ type: list
+ description: default list of tags to run in your plays, Skip Tags has precedence.
+ env: [{name: ANSIBLE_RUN_TAGS}]
+ ini:
+ - {key: run, section: tags}
+ version_added: "2.5"
+TAGS_SKIP:
+ name: Skip Tags
+ default: []
+ type: list
+ description: default list of tags to skip in your plays, has precedence over Run Tags
+ env: [{name: ANSIBLE_SKIP_TAGS}]
+ ini:
+ - {key: skip, section: tags}
+ version_added: "2.5"
+TASK_TIMEOUT:
+ name: Task Timeout
+ default: 0
+ description:
+ - Set the maximum time (in seconds) that a task can run for.
+ - If set to 0 (the default) there is no timeout.
+ env: [{name: ANSIBLE_TASK_TIMEOUT}]
+ ini:
+ - {key: task_timeout, section: defaults}
+ type: integer
+ version_added: '2.10'
+WORKER_SHUTDOWN_POLL_COUNT:
+ name: Worker Shutdown Poll Count
+ default: 0
+ description:
+ - The maximum number of times to check Task Queue Manager worker processes to verify they have exited cleanly.
+ - After this limit is reached any worker processes still running will be terminated.
+ - This is for internal use only.
+ env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT}]
+ type: integer
+ version_added: '2.10'
+WORKER_SHUTDOWN_POLL_DELAY:
+ name: Worker Shutdown Poll Delay
+ default: 0.1
+ description:
+ - The number of seconds to sleep between polling loops when checking Task Queue Manager worker processes to verify they have exited cleanly.
+ - This is for internal use only.
+ env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY}]
+ type: float
+ version_added: '2.10'
+USE_PERSISTENT_CONNECTIONS:
+ name: Persistence
+ default: False
+ description: Toggles the use of persistence for connections.
+ env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}]
+ ini:
+ - {key: use_persistent_connections, section: defaults}
+ type: boolean
+VARIABLE_PLUGINS_ENABLED:
+ name: Vars plugin enabled list
+ default: ['host_group_vars']
+ description: Accept list for variable plugins that require it.
+ env: [{name: ANSIBLE_VARS_ENABLED}]
+ ini:
+ - {key: vars_plugins_enabled, section: defaults}
+ type: list
+ version_added: "2.10"
+VARIABLE_PRECEDENCE:
+ name: Group variable precedence
+ default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play']
+ description: Allows to change the group variable precedence merge order.
+ env: [{name: ANSIBLE_PRECEDENCE}]
+ ini:
+ - {key: precedence, section: defaults}
+ type: list
+ version_added: "2.4"
+WIN_ASYNC_STARTUP_TIMEOUT:
+ name: Windows Async Startup Timeout
+ default: 5
+ description:
+ - For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
+ this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used
+ on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
+ - This is not the total time an async command can run for, but is a separate timeout to wait for an async command to
+ start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the
+ overall maximum duration the task can take will be extended by the amount specified here.
+ env: [{name: ANSIBLE_WIN_ASYNC_STARTUP_TIMEOUT}]
+ ini:
+ - {key: win_async_startup_timeout, section: defaults}
+ type: integer
+ vars:
+ - {name: ansible_win_async_startup_timeout}
+ version_added: '2.10'
+YAML_FILENAME_EXTENSIONS:
+ name: Valid YAML extensions
+ default: [".yml", ".yaml", ".json"]
+ description:
+ - "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
+ - 'This affects vars_files, include_vars, inventory and vars plugins among others.'
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ ini:
+ - section: defaults
+ key: yaml_valid_extensions
+ type: list
+NETCONF_SSH_CONFIG:
+ description: This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump
+ host ssh settings should be present in ~/.ssh/config file, alternatively it can be set
+ to custom ssh configuration file path to read the bastion/jump host settings.
+ env: [{name: ANSIBLE_NETCONF_SSH_CONFIG}]
+ ini:
+ - {key: ssh_config, section: netconf_connection}
+ yaml: {key: netconf_connection.ssh_config}
+ default: null
+STRING_CONVERSION_ACTION:
+ version_added: '2.8'
+ description:
+ - Action to take when a module parameter value is converted to a string (this does not affect variables).
+ For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
+ will be converted by the YAML parser unless fully quoted.
+ - Valid options are 'error', 'warn', and 'ignore'.
+ - Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
+ default: 'warn'
+ env:
+ - name: ANSIBLE_STRING_CONVERSION_ACTION
+ ini:
+ - section: defaults
+ key: string_conversion_action
+ type: string
+VALIDATE_ACTION_GROUP_METADATA:
+ version_added: '2.12'
+ description:
+ - A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group.
+ Metadata containing unexpected fields or value types will produce a warning when this is True.
+ default: True
+ env: [{name: ANSIBLE_VALIDATE_ACTION_GROUP_METADATA}]
+ ini:
+ - section: defaults
+ key: validate_action_group_metadata
+ type: bool
+VERBOSE_TO_STDERR:
+ version_added: '2.8'
+ description:
+ - Force 'verbose' option to use stderr instead of stdout
+ default: False
+ env:
+ - name: ANSIBLE_VERBOSE_TO_STDERR
+ ini:
+ - section: defaults
+ key: verbose_to_stderr
+ type: bool
+...
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
new file mode 100644
index 0000000..e1fde1d
--- /dev/null
+++ b/lib/ansible/config/manager.py
@@ -0,0 +1,607 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import configparser
+import os
+import os.path
+import sys
+import stat
+import tempfile
+import traceback
+
+from collections import namedtuple
+from collections.abc import Mapping, Sequence
+from jinja2.nativetypes import NativeEnvironment
+
+from ansible.errors import AnsibleOptionsError, AnsibleError
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.module_utils.common.yaml import yaml_load
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.quoting import unquote
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils import py3compat
+from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath
+
+
+Plugin = namedtuple('Plugin', 'name type')
+Setting = namedtuple('Setting', 'name value origin type')
+
+INTERNAL_DEFS = {'lookup': ('_terms',)}
+
+
+def _get_entry(plugin_type, plugin_name, config):
+ ''' construct entry for requested config '''
+ entry = ''
+ if plugin_type:
+ entry += 'plugin_type: %s ' % plugin_type
+ if plugin_name:
+ entry += 'plugin: %s ' % plugin_name
+ entry += 'setting: %s ' % config
+ return entry
+
+
+# FIXME: see if we can unify in module_utils with similar function used by argspec
+def ensure_type(value, value_type, origin=None):
+ ''' return a configuration variable with casting
+ :arg value: The value to ensure correct typing of
+ :kwarg value_type: The type of the value. This can be any of the following strings:
+ :boolean: sets the value to a True or False value
+ :bool: Same as 'boolean'
+ :integer: Sets the value to an integer or raises a ValueType error
+ :int: Same as 'integer'
+ :float: Sets the value to a float or raises a ValueType error
+ :list: Treats the value as a comma separated list. Split the value
+ and return it as a python list.
+ :none: Sets the value to None
+ :path: Expands any environment variables and tilde's in the value.
+ :tmppath: Create a unique temporary directory inside of the directory
+ specified by value and return its path.
+ :temppath: Same as 'tmppath'
+ :tmp: Same as 'tmppath'
+ :pathlist: Treat the value as a typical PATH string. (On POSIX, this
+ means colon separated strings.) Split the value and then expand
+ each part for environment variables and tildes.
+ :pathspec: Treat the value as a PATH string. Expands any environment variables
+ tildes's in the value.
+ :str: Sets the value to string types.
+ :string: Same as 'str'
+ '''
+
+ errmsg = ''
+ basedir = None
+ if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
+ basedir = origin
+
+ if value_type:
+ value_type = value_type.lower()
+
+ if value is not None:
+ if value_type in ('boolean', 'bool'):
+ value = boolean(value, strict=False)
+
+ elif value_type in ('integer', 'int'):
+ value = int(value)
+
+ elif value_type == 'float':
+ value = float(value)
+
+ elif value_type == 'list':
+ if isinstance(value, string_types):
+ value = [unquote(x.strip()) for x in value.split(',')]
+ elif not isinstance(value, Sequence):
+ errmsg = 'list'
+
+ elif value_type == 'none':
+ if value == "None":
+ value = None
+
+ if value is not None:
+ errmsg = 'None'
+
+ elif value_type == 'path':
+ if isinstance(value, string_types):
+ value = resolve_path(value, basedir=basedir)
+ else:
+ errmsg = 'path'
+
+ elif value_type in ('tmp', 'temppath', 'tmppath'):
+ if isinstance(value, string_types):
+ value = resolve_path(value, basedir=basedir)
+ if not os.path.exists(value):
+ makedirs_safe(value, 0o700)
+ prefix = 'ansible-local-%s' % os.getpid()
+ value = tempfile.mkdtemp(prefix=prefix, dir=value)
+ atexit.register(cleanup_tmp_file, value, warn=True)
+ else:
+ errmsg = 'temppath'
+
+ elif value_type == 'pathspec':
+ if isinstance(value, string_types):
+ value = value.split(os.pathsep)
+
+ if isinstance(value, Sequence):
+ value = [resolve_path(x, basedir=basedir) for x in value]
+ else:
+ errmsg = 'pathspec'
+
+ elif value_type == 'pathlist':
+ if isinstance(value, string_types):
+ value = [x.strip() for x in value.split(',')]
+
+ if isinstance(value, Sequence):
+ value = [resolve_path(x, basedir=basedir) for x in value]
+ else:
+ errmsg = 'pathlist'
+
+ elif value_type in ('dict', 'dictionary'):
+ if not isinstance(value, Mapping):
+ errmsg = 'dictionary'
+
+ elif value_type in ('str', 'string'):
+ if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode, bool, int, float, complex)):
+ value = unquote(to_text(value, errors='surrogate_or_strict'))
+ else:
+ errmsg = 'string'
+
+ # defaults to string type
+ elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
+ value = unquote(to_text(value, errors='surrogate_or_strict'))
+
+ if errmsg:
+ raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
+
+ return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
+
+
+# FIXME: see if this can live in utils/path
+def resolve_path(path, basedir=None):
+ ''' resolve relative or 'variable' paths '''
+ if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
+ path = path.replace('{{CWD}}', os.getcwd())
+
+ return unfrackpath(path, follow=False, basedir=basedir)
+
+
+# FIXME: generic file type?
+def get_config_type(cfile):
+
+ ftype = None
+ if cfile is not None:
+ ext = os.path.splitext(cfile)[-1]
+ if ext in ('.ini', '.cfg'):
+ ftype = 'ini'
+ elif ext in ('.yaml', '.yml'):
+ ftype = 'yaml'
+ else:
+ raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext)))
+
+ return ftype
+
+
+# FIXME: can move to module_utils for use for ini plugins also?
+def get_ini_config_value(p, entry):
+ ''' returns the value of last ini entry found '''
+ value = None
+ if p is not None:
+ try:
+ value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
+ except Exception: # FIXME: actually report issues here
+ pass
+ return value
+
+
+def find_ini_config_file(warnings=None):
+ ''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
+ # FIXME: eventually deprecate ini configs
+
+ if warnings is None:
+ # Note: In this case, warnings does nothing
+ warnings = set()
+
+ # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
+ # We can't use None because we could set path to None.
+ SENTINEL = object
+
+ potential_paths = []
+
+ # Environment setting
+ path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL)
+ if path_from_env is not SENTINEL:
+ path_from_env = unfrackpath(path_from_env, follow=False)
+ if os.path.isdir(to_bytes(path_from_env)):
+ path_from_env = os.path.join(path_from_env, "ansible.cfg")
+ potential_paths.append(path_from_env)
+
+ # Current working directory
+ warn_cmd_public = False
+ try:
+ cwd = os.getcwd()
+ perms = os.stat(cwd)
+ cwd_cfg = os.path.join(cwd, "ansible.cfg")
+ if perms.st_mode & stat.S_IWOTH:
+ # Working directory is world writable so we'll skip it.
+ # Still have to look for a file here, though, so that we know if we have to warn
+ if os.path.exists(cwd_cfg):
+ warn_cmd_public = True
+ else:
+ potential_paths.append(to_text(cwd_cfg, errors='surrogate_or_strict'))
+ except OSError:
+ # If we can't access cwd, we'll simply skip it as a possible config source
+ pass
+
+ # Per user location
+ potential_paths.append(unfrackpath("~/.ansible.cfg", follow=False))
+
+ # System location
+ potential_paths.append("/etc/ansible/ansible.cfg")
+
+ for path in potential_paths:
+ b_path = to_bytes(path)
+ if os.path.exists(b_path) and os.access(b_path, os.R_OK):
+ break
+ else:
+ path = None
+
+ # Emit a warning if all the following are true:
+ # * We did not use a config from ANSIBLE_CONFIG
+ # * There's an ansible.cfg in the current working directory that we skipped
+ if path_from_env != path and warn_cmd_public:
+ warnings.add(u"Ansible is being run in a world writable directory (%s),"
+ u" ignoring it as an ansible.cfg source."
+ u" For more information see"
+ u" https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir"
+ % to_text(cwd))
+
+ return path
+
+
+def _add_base_defs_deprecations(base_defs):
+ '''Add deprecation source 'ansible.builtin' to deprecations in base.yml'''
+ def process(entry):
+ if 'deprecated' in entry:
+ entry['deprecated']['collection_name'] = 'ansible.builtin'
+
+ for dummy, data in base_defs.items():
+ process(data)
+ for section in ('ini', 'env', 'vars'):
+ if section in data:
+ for entry in data[section]:
+ process(entry)
+
+
+class ConfigManager(object):
+
+ DEPRECATED = [] # type: list[tuple[str, dict[str, str]]]
+ WARNINGS = set() # type: set[str]
+
+ def __init__(self, conf_file=None, defs_file=None):
+
+ self._base_defs = {}
+ self._plugins = {}
+ self._parsers = {}
+
+ self._config_file = conf_file
+
+ self._base_defs = self._read_config_yaml_file(defs_file or ('%s/base.yml' % os.path.dirname(__file__)))
+ _add_base_defs_deprecations(self._base_defs)
+
+ if self._config_file is None:
+ # set config using ini
+ self._config_file = find_ini_config_file(self.WARNINGS)
+
+ # consume configuration
+ if self._config_file:
+ # initialize parser and read config
+ self._parse_config_file()
+
+ # ensure we always have config def entry
+ self._base_defs['CONFIG_FILE'] = {'default': None, 'type': 'path'}
+
+ def _read_config_yaml_file(self, yml_file):
+ # TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
+ # Currently this is only used with absolute paths to the `ansible/config` directory
+ yml_file = to_bytes(yml_file)
+ if os.path.exists(yml_file):
+ with open(yml_file, 'rb') as config_def:
+ return yaml_load(config_def) or {}
+ raise AnsibleError(
+ "Missing base YAML definition file (bad install?): %s" % to_native(yml_file))
+
+ def _parse_config_file(self, cfile=None):
+ ''' return flat configuration settings from file(s) '''
+ # TODO: take list of files with merge/nomerge
+
+ if cfile is None:
+ cfile = self._config_file
+
+ ftype = get_config_type(cfile)
+ if cfile is not None:
+ if ftype == 'ini':
+ self._parsers[cfile] = configparser.ConfigParser(inline_comment_prefixes=(';',))
+ with open(to_bytes(cfile), 'rb') as f:
+ try:
+ cfg_text = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleOptionsError("Error reading config file(%s) because the config file was not utf8 encoded: %s" % (cfile, to_native(e)))
+ try:
+ self._parsers[cfile].read_string(cfg_text)
+ except configparser.Error as e:
+ raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e)))
+ # FIXME: this should eventually handle yaml config files
+ # elif ftype == 'yaml':
+ # with open(cfile, 'rb') as config_stream:
+ # self._parsers[cfile] = yaml_load(config_stream)
+ else:
+ raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype))
+
+ def _find_yaml_config_files(self):
+ ''' Load YAML Config Files in order, check merge flags, keep origin of settings'''
+ pass
+
+ def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
+
+ options = {}
+ defs = self.get_configuration_definitions(plugin_type, name)
+ for option in defs:
+ options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
+
+ return options
+
+ def get_plugin_vars(self, plugin_type, name):
+
+ pvars = []
+ for pdef in self.get_configuration_definitions(plugin_type, name).values():
+ if 'vars' in pdef and pdef['vars']:
+ for var_entry in pdef['vars']:
+ pvars.append(var_entry['name'])
+ return pvars
+
+ def get_plugin_options_from_var(self, plugin_type, name, variable):
+
+ options = []
+ for option_name, pdef in self.get_configuration_definitions(plugin_type, name).items():
+ if 'vars' in pdef and pdef['vars']:
+ for var_entry in pdef['vars']:
+ if variable == var_entry['name']:
+ options.append(option_name)
+ return options
+
+ def get_configuration_definition(self, name, plugin_type=None, plugin_name=None):
+
+ ret = {}
+ if plugin_type is None:
+ ret = self._base_defs.get(name, None)
+ elif plugin_name is None:
+ ret = self._plugins.get(plugin_type, {}).get(name, None)
+ else:
+ ret = self._plugins.get(plugin_type, {}).get(plugin_name, {}).get(name, None)
+
+ return ret
+
+ def has_configuration_definition(self, plugin_type, name):
+
+ has = False
+ if plugin_type in self._plugins:
+ has = (name in self._plugins[plugin_type])
+
+ return has
+
+ def get_configuration_definitions(self, plugin_type=None, name=None, ignore_private=False):
+ ''' just list the possible settings, either base or for specific plugins or plugin '''
+
+ ret = {}
+ if plugin_type is None:
+ ret = self._base_defs
+ elif name is None:
+ ret = self._plugins.get(plugin_type, {})
+ else:
+ ret = self._plugins.get(plugin_type, {}).get(name, {})
+
+ if ignore_private:
+ for cdef in list(ret.keys()):
+ if cdef.startswith('_'):
+ del ret[cdef]
+
+ return ret
+
+ def _loop_entries(self, container, entry_list):
+ ''' repeat code for value entry assignment '''
+
+ value = None
+ origin = None
+ for entry in entry_list:
+ name = entry.get('name')
+ try:
+ temp_value = container.get(name, None)
+ except UnicodeEncodeError:
+ self.WARNINGS.add(u'value for config entry {0} contains invalid characters, ignoring...'.format(to_text(name)))
+ continue
+ if temp_value is not None: # only set if entry is defined in container
+ # inline vault variables should be converted to a text string
+ if isinstance(temp_value, AnsibleVaultEncryptedUnicode):
+ temp_value = to_text(temp_value, errors='surrogate_or_strict')
+
+ value = temp_value
+ origin = name
+
+ # deal with deprecation of setting source, if used
+ if 'deprecated' in entry:
+ self.DEPRECATED.append((entry['name'], entry['deprecated']))
+
+ return value, origin
+
+ def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
+ ''' wrapper '''
+
+ try:
+ value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name,
+ keys=keys, variables=variables, direct=direct)
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, to_native(e)), orig_exc=e)
+ return value
+
+ def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
+ ''' Given a config key figure out the actual value and report on the origin of the settings '''
+ if cfile is None:
+ # use default config
+ cfile = self._config_file
+
+ if config == 'CONFIG_FILE':
+ return cfile, ''
+
+ # Note: sources that are lists listed in low to high precedence (last one wins)
+ value = None
+ origin = None
+
+ defs = self.get_configuration_definitions(plugin_type, plugin_name)
+ if config in defs:
+
+ aliases = defs[config].get('aliases', [])
+
+ # direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults
+ if direct:
+ if config in direct:
+ value = direct[config]
+ origin = 'Direct'
+ else:
+ direct_aliases = [direct[alias] for alias in aliases if alias in direct]
+ if direct_aliases:
+ value = direct_aliases[0]
+ origin = 'Direct'
+
+ if value is None and variables and defs[config].get('vars'):
+ # Use 'variable overrides' if present, highest precedence, but only present when querying running play
+ value, origin = self._loop_entries(variables, defs[config]['vars'])
+ origin = 'var: %s' % origin
+
+ # use playbook keywords if you have em
+ if value is None and defs[config].get('keyword') and keys:
+ value, origin = self._loop_entries(keys, defs[config]['keyword'])
+ origin = 'keyword: %s' % origin
+
+ # automap to keywords
+ # TODO: deprecate these in favor of explicit keyword above
+ if value is None and keys:
+ if config in keys:
+ value = keys[config]
+ keyword = config
+
+ elif aliases:
+ for alias in aliases:
+ if alias in keys:
+ value = keys[alias]
+ keyword = alias
+ break
+
+ if value is not None:
+ origin = 'keyword: %s' % keyword
+
+ if value is None and 'cli' in defs[config]:
+ # avoid circular import .. until valid
+ from ansible import context
+ value, origin = self._loop_entries(context.CLIARGS, defs[config]['cli'])
+ origin = 'cli: %s' % origin
+
+ # env vars are next precedence
+ if value is None and defs[config].get('env'):
+ value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
+ origin = 'env: %s' % origin
+
+ # try config file entries next, if we have one
+ if self._parsers.get(cfile, None) is None:
+ self._parse_config_file(cfile)
+
+ if value is None and cfile is not None:
+ ftype = get_config_type(cfile)
+ if ftype and defs[config].get(ftype):
+ if ftype == 'ini':
+ # load from ini config
+ try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe
+ for ini_entry in defs[config]['ini']:
+ temp_value = get_ini_config_value(self._parsers[cfile], ini_entry)
+ if temp_value is not None:
+ value = temp_value
+ origin = cfile
+ if 'deprecated' in ini_entry:
+ self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated']))
+ except Exception as e:
+ sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
+ elif ftype == 'yaml':
+ # FIXME: implement, also , break down key from defs (. notation???)
+ origin = cfile
+
+ # set default if we got here w/o a value
+ if value is None:
+ if defs[config].get('required', False):
+ if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
+ raise AnsibleError("No setting was provided for required configuration %s" %
+ to_native(_get_entry(plugin_type, plugin_name, config)))
+ else:
+ origin = 'default'
+ value = defs[config].get('default')
+ if isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')) and variables is not None:
+ # template default values if possible
+ # NOTE: cannot use is_template due to circular dep
+ try:
+ t = NativeEnvironment().from_string(value)
+ value = t.render(variables)
+ except Exception:
+ pass # not templatable
+
+ # ensure correct type, can raise exceptions on mismatched types
+ try:
+ value = ensure_type(value, defs[config].get('type'), origin=origin)
+ except ValueError as e:
+ if origin.startswith('env:') and value == '':
+ # this is empty env var for non string so we can set to default
+ origin = 'default'
+ value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin)
+ else:
+ raise AnsibleOptionsError('Invalid type for configuration option %s (from %s): %s' %
+ (to_native(_get_entry(plugin_type, plugin_name, config)).strip(), origin, to_native(e)))
+
+ # deal with restricted values
+ if value is not None and 'choices' in defs[config] and defs[config]['choices'] is not None:
+ invalid_choices = True # assume the worst!
+ if defs[config].get('type') == 'list':
+ # for a list type, compare all values in type are allowed
+ invalid_choices = not all(choice in defs[config]['choices'] for choice in value)
+ else:
+ # these should be only the simple data types (string, int, bool, float, etc) .. ignore dicts for now
+ invalid_choices = value not in defs[config]['choices']
+
+ if invalid_choices:
+
+ if isinstance(defs[config]['choices'], Mapping):
+ valid = ', '.join([to_text(k) for k in defs[config]['choices'].keys()])
+ elif isinstance(defs[config]['choices'], string_types):
+ valid = defs[config]['choices']
+ elif isinstance(defs[config]['choices'], Sequence):
+ valid = ', '.join([to_text(c) for c in defs[config]['choices']])
+ else:
+ valid = defs[config]['choices']
+
+ raise AnsibleOptionsError('Invalid value "%s" for configuration option "%s", valid values are: %s' %
+ (value, to_native(_get_entry(plugin_type, plugin_name, config)), valid))
+
+ # deal with deprecation of the setting
+ if 'deprecated' in defs[config] and origin != 'default':
+ self.DEPRECATED.append((config, defs[config].get('deprecated')))
+ else:
+ raise AnsibleError('Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config)))
+
+ return value, origin
+
+ def initialize_plugin_configuration_definitions(self, plugin_type, name, defs):
+
+ if plugin_type not in self._plugins:
+ self._plugins[plugin_type] = {}
+
+ self._plugins[plugin_type][name] = defs
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
new file mode 100644
index 0000000..23b1cf4
--- /dev/null
+++ b/lib/ansible/constants.py
@@ -0,0 +1,191 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from string import ascii_letters, digits
+
+from ansible.config.manager import ConfigManager
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.collections import Sequence
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
+from ansible.release import __version__
+from ansible.utils.fqcn import add_internal_fqcns
+
+
+def _warning(msg):
+ ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
+ try:
+ from ansible.utils.display import Display
+ Display().warning(msg)
+ except Exception:
+ import sys
+ sys.stderr.write(' [WARNING] %s\n' % (msg))
+
+
+def _deprecated(msg, version):
+ ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
+ try:
+ from ansible.utils.display import Display
+ Display().deprecated(msg, version=version)
+ except Exception:
+ import sys
+ sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
+
+
+def set_constant(name, value, export=vars()):
+ ''' sets constants and returns resolved options dict '''
+ export[name] = value
+
+
+class _DeprecatedSequenceConstant(Sequence):
+ def __init__(self, value, msg, version):
+ self._value = value
+ self._msg = msg
+ self._version = version
+
+ def __len__(self):
+ _deprecated(self._msg, self._version)
+ return len(self._value)
+
+ def __getitem__(self, y):
+ _deprecated(self._msg, self._version)
+ return self._value[y]
+
+
+# CONSTANTS ### yes, actual ones
+
+# The following are hard-coded action names
+_ACTION_DEBUG = add_internal_fqcns(('debug', ))
+_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
+_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
+_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
+_ACTION_INCLUDE = add_internal_fqcns(('include', ))
+_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
+_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
+_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
+_ACTION_INVENTORY_TASKS = add_internal_fqcns(('add_host', 'group_by'))
+_ACTION_META = add_internal_fqcns(('meta', ))
+_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
+_ACTION_SETUP = add_internal_fqcns(('setup', ))
+_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
+_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
+_ACTION_ALL_INCLUDES = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
+_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
+_ACTION_ALL_INCLUDE_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS
+_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
+_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
+
+# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
+COLOR_CODES = {
+ 'black': u'0;30', 'bright gray': u'0;37',
+ 'blue': u'0;34', 'white': u'1;37',
+ 'green': u'0;32', 'bright blue': u'1;34',
+ 'cyan': u'0;36', 'bright green': u'1;32',
+ 'red': u'0;31', 'bright cyan': u'1;36',
+ 'purple': u'0;35', 'bright red': u'1;31',
+ 'yellow': u'0;33', 'bright purple': u'1;35',
+ 'dark gray': u'1;30', 'bright yellow': u'1;33',
+ 'magenta': u'0;35', 'bright magenta': u'1;35',
+ 'normal': u'0',
+}
+REJECT_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
+BOOL_TRUE = BOOLEANS_TRUE
+COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
+
+PYTHON_DOC_EXTENSIONS = ('.py',)
+YAML_DOC_EXTENSIONS = ('.yml', '.yaml')
+DOC_EXTENSIONS = PYTHON_DOC_EXTENSIONS + YAML_DOC_EXTENSIONS
+
+DEFAULT_BECOME_PASS = None
+DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
+DEFAULT_REMOTE_PASS = None
+DEFAULT_SUBSET = None
+# FIXME: expand to other plugins, but never doc fragments
+CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars')
+# NOTE: always update the docs/docsite/Makefile to match
+DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'test', 'filter')
+IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "MANIFEST", "Makefile") # ignore during module search
+INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
+LOCALHOST = ('127.0.0.1', 'localhost', '::1')
+MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
+ 'ansible.windows.win_shell', 'raw', 'script')))
+MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
+ 'ansible.windows.win_shell', 'raw')))
+RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
+SYNTHETIC_COLLECTIONS = ('ansible.builtin', 'ansible.legacy')
+TREE_DIR = None
+VAULT_VERSION_MIN = 1.0
+VAULT_VERSION_MAX = 1.0
+
+# This matches a string that cannot be used as a valid python variable name i.e 'not-valid', 'not!valid@either' '1_nor_This'
+INVALID_VARIABLE_NAMES = re.compile(r'^[\d\W]|[^\w]')
+
+
+# FIXME: remove once play_context mangling is removed
+# the magic variable mapping dictionary below is used to translate
+# host/inventory variables to fields in the PlayContext
+# object. The dictionary values are tuples, to account for aliases
+# in variable names.
+
+COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
+ 'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
+ 'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
+
+MAGIC_VARIABLE_MAPPING = dict(
+
+ # base
+ connection=('ansible_connection', ),
+ module_compression=('ansible_module_compression', ),
+ shell=('ansible_shell_type', ),
+ executable=('ansible_shell_executable', ),
+
+ # connection common
+ remote_addr=('ansible_ssh_host', 'ansible_host'),
+ remote_user=('ansible_ssh_user', 'ansible_user'),
+ password=('ansible_ssh_pass', 'ansible_password'),
+ port=('ansible_ssh_port', 'ansible_port'),
+ pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
+ timeout=('ansible_ssh_timeout', 'ansible_timeout'),
+ private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
+
+ # networking modules
+ network_os=('ansible_network_os', ),
+ connection_user=('ansible_connection_user',),
+
+ # ssh TODO: remove
+ ssh_executable=('ansible_ssh_executable', ),
+ ssh_common_args=('ansible_ssh_common_args', ),
+ sftp_extra_args=('ansible_sftp_extra_args', ),
+ scp_extra_args=('ansible_scp_extra_args', ),
+ ssh_extra_args=('ansible_ssh_extra_args', ),
+ ssh_transfer_method=('ansible_ssh_transfer_method', ),
+
+ # docker TODO: remove
+ docker_extra_args=('ansible_docker_extra_args', ),
+
+ # become
+ become=('ansible_become', ),
+ become_method=('ansible_become_method', ),
+ become_user=('ansible_become_user', ),
+ become_pass=('ansible_become_password', 'ansible_become_pass'),
+ become_exe=('ansible_become_exe', ),
+ become_flags=('ansible_become_flags', ),
+)
+
+# POPULATE SETTINGS FROM CONFIG ###
+config = ConfigManager()
+
+# Generate constants from config
+for setting in config.get_configuration_definitions():
+ set_constant(setting, config.get_config_value(setting, variables=vars()))
+
+for warn in config.WARNINGS:
+ _warning(warn)
diff --git a/lib/ansible/context.py b/lib/ansible/context.py
new file mode 100644
index 0000000..216c135
--- /dev/null
+++ b/lib/ansible/context.py
@@ -0,0 +1,57 @@
+# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Context of the running Ansible.
+
+In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
+with different contexts but that is currently out of scope as the Ansible library is just for
+running the ansible command line tools.
+
+These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
+"""
+
+from collections.abc import Mapping, Set
+
+from ansible.module_utils.common.collections import is_sequence
+from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs
+
+
+__all__ = ('CLIARGS',)
+
+# Note: this is not the singleton version. The Singleton is only created once the program has
+# actually parsed the args
+CLIARGS = CLIArgs({})
+
+
+# This should be called immediately after cli_args are processed (parsed, validated, and any
+# normalization performed on them). No other code should call it
+def _init_global_context(cli_args):
+ """Initialize the global context objects"""
+ global CLIARGS
+ CLIARGS = GlobalCLIArgs.from_options(cli_args)
+
+
+def cliargs_deferred_get(key, default=None, shallowcopy=False):
+ """Closure over getting a key from CLIARGS with shallow copy functionality
+
+ Primarily used in ``FieldAttribute`` where we need to defer setting the default
+ until after the CLI arguments have been parsed
+
+ This function is not directly bound to ``CliArgs`` so that it works with
+ ``CLIARGS`` being replaced
+ """
+ def inner():
+ value = CLIARGS.get(key, default=default)
+ if not shallowcopy:
+ return value
+ elif is_sequence(value):
+ return value[:]
+ elif isinstance(value, (Mapping, Set)):
+ return value.copy()
+ return value
+ return inner
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
new file mode 100644
index 0000000..a113225
--- /dev/null
+++ b/lib/ansible/errors/__init__.py
@@ -0,0 +1,373 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import traceback
+
+from collections.abc import Sequence
+
+from ansible.errors.yaml_strings import (
+ YAML_COMMON_DICT_ERROR,
+ YAML_COMMON_LEADING_TAB_ERROR,
+ YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
+ YAML_COMMON_UNBALANCED_QUOTES_ERROR,
+ YAML_COMMON_UNQUOTED_COLON_ERROR,
+ YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
+ YAML_POSITION_DETAILS,
+ YAML_AND_SHORTHAND_ERROR,
+)
+from ansible.module_utils._text import to_native, to_text
+
+
+class AnsibleError(Exception):
+ '''
+ This is the base class for all errors raised from Ansible code,
+ and can be instantiated with two optional parameters beyond the
+ error message to control whether detailed information is displayed
+ when the error occurred while parsing a data file of some kind.
+
+ Usage:
+
+ raise AnsibleError('some message here', obj=obj, show_content=True)
+
+ Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
+ which should be returned by the DataLoader() class.
+ '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
+ super(AnsibleError, self).__init__(message)
+
+ self._show_content = show_content
+ self._suppress_extended_error = suppress_extended_error
+ self._message = to_native(message)
+ self.obj = obj
+ self.orig_exc = orig_exc
+
+ @property
+ def message(self):
+ # we import this here to prevent an import loop problem,
+ # since the objects code also imports ansible.errors
+ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+
+ message = [self._message]
+ if isinstance(self.obj, AnsibleBaseYAMLObject):
+ extended_error = self._get_extended_error()
+ if extended_error and not self._suppress_extended_error:
+ message.append(
+ '\n\n%s' % to_native(extended_error)
+ )
+ elif self.orig_exc:
+ message.append('. %s' % to_native(self.orig_exc))
+
+ return ''.join(message)
+
+ @message.setter
+ def message(self, val):
+ self._message = val
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ return self.message
+
+ def _get_error_lines_from_file(self, file_name, line_number):
+ '''
+ Returns the line in the file which corresponds to the reported error
+ location, as well as the line preceding it (if the error did not
+ occur on the first line), to provide context to the error.
+ '''
+
+ target_line = ''
+ prev_line = ''
+
+ with open(file_name, 'r') as f:
+ lines = f.readlines()
+
+ # In case of a YAML loading error, PyYAML will report the very last line
+ # as the location of the error. Avoid an index error here in order to
+ # return a helpful message.
+ file_length = len(lines)
+ if line_number >= file_length:
+ line_number = file_length - 1
+
+ # If target_line contains only whitespace, move backwards until
+ # actual code is found. If there are several empty lines after target_line,
+ # the error lines would just be blank, which is not very helpful.
+ target_line = lines[line_number]
+ while not target_line.strip():
+ line_number -= 1
+ target_line = lines[line_number]
+
+ if line_number > 0:
+ prev_line = lines[line_number - 1]
+
+ return (target_line, prev_line)
+
+ def _get_extended_error(self):
+ '''
+ Given an object reporting the location of the exception in a file, return
+ detailed information regarding it including:
+
+ * the line which caused the error as well as the one preceding it
+ * causes and suggested remedies for common syntax errors
+
+ If this error was created with show_content=False, the reporting of content
+ is suppressed, as the file contents may be sensitive (ie. vault data).
+ '''
+
+ error_message = ''
+
+ try:
+ (src_file, line_number, col_number) = self.obj.ansible_pos
+ error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
+ if src_file not in ('<string>', '<unicode>') and self._show_content:
+ (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
+ target_line = to_text(target_line)
+ prev_line = to_text(prev_line)
+ if target_line:
+ stripped_line = target_line.replace(" ", "")
+
+ # Check for k=v syntax in addition to YAML syntax and set the appropriate error position,
+ # arrow index
+ if re.search(r'\w+(\s+)?=(\s+)?[\w/-]+', prev_line):
+ error_position = prev_line.rstrip().find('=')
+ arrow_line = (" " * error_position) + "^ here"
+ error_message = YAML_POSITION_DETAILS % (src_file, line_number - 1, error_position + 1)
+ error_message += "\nThe offending line appears to be:\n\n%s\n%s\n\n" % (prev_line.rstrip(), arrow_line)
+ error_message += YAML_AND_SHORTHAND_ERROR
+ else:
+ arrow_line = (" " * (col_number - 1)) + "^ here"
+ error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
+
+ # TODO: There may be cases where there is a valid tab in a line that has other errors.
+ if '\t' in target_line:
+ error_message += YAML_COMMON_LEADING_TAB_ERROR
+ # common error/remediation checking here:
+ # check for unquoted vars starting lines
+ if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
+ error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
+ # check for common dictionary mistakes
+ elif ":{{" in stripped_line and "}}" in stripped_line:
+ error_message += YAML_COMMON_DICT_ERROR
+ # check for common unquoted colon mistakes
+ elif (len(target_line) and
+ len(target_line) > 1 and
+ len(target_line) > col_number and
+ target_line[col_number] == ":" and
+ target_line.count(':') > 1):
+ error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
+ # otherwise, check for some common quoting mistakes
+ else:
+ # FIXME: This needs to split on the first ':' to account for modules like lineinfile
+ # that may have lines that contain legitimate colons, e.g., line: 'i ALL= (ALL) NOPASSWD: ALL'
+ # and throw off the quote matching logic.
+ parts = target_line.split(":")
+ if len(parts) > 1:
+ middle = parts[1].strip()
+ match = False
+ unbalanced = False
+
+ if middle.startswith("'") and not middle.endswith("'"):
+ match = True
+ elif middle.startswith('"') and not middle.endswith('"'):
+ match = True
+
+ if (len(middle) > 0 and
+ middle[0] in ['"', "'"] and
+ middle[-1] in ['"', "'"] and
+ target_line.count("'") > 2 or
+ target_line.count('"') > 2):
+ unbalanced = True
+
+ if match:
+ error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
+ if unbalanced:
+ error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
+
+ except (IOError, TypeError):
+ error_message += '\n(could not open file to display line)'
+ except IndexError:
+ error_message += '\n(specified line no longer in file, maybe it changed?)'
+
+ return error_message
+
+
+class AnsibleAssertionError(AnsibleError, AssertionError):
+ '''Invalid assertion'''
+ pass
+
+
+class AnsibleOptionsError(AnsibleError):
+ ''' bad or incomplete options passed '''
+ pass
+
+
+class AnsibleParserError(AnsibleError):
+ ''' something was detected early that is wrong about a playbook or data file '''
+ pass
+
+
+class AnsibleInternalError(AnsibleError):
+ ''' internal safeguards tripped, something happened in the code that should never happen '''
+ pass
+
+
+class AnsibleRuntimeError(AnsibleError):
+ ''' ansible had a problem while running a playbook '''
+ pass
+
+
+class AnsibleModuleError(AnsibleRuntimeError):
+ ''' a module failed somehow '''
+ pass
+
+
+class AnsibleConnectionFailure(AnsibleRuntimeError):
+ ''' the transport / connection_plugin had a fatal error '''
+ pass
+
+
+class AnsibleAuthenticationFailure(AnsibleConnectionFailure):
+ '''invalid username/password/key'''
+ pass
+
+
+class AnsibleCallbackError(AnsibleRuntimeError):
+ ''' a callback failure '''
+ pass
+
+
+class AnsibleTemplateError(AnsibleRuntimeError):
+ '''A template related error'''
+ pass
+
+
+class AnsibleFilterError(AnsibleTemplateError):
+ ''' a templating failure '''
+ pass
+
+
+class AnsibleLookupError(AnsibleTemplateError):
+ ''' a lookup failure '''
+ pass
+
+
+class AnsibleUndefinedVariable(AnsibleTemplateError):
+ ''' a templating failure '''
+ pass
+
+
+class AnsibleFileNotFound(AnsibleRuntimeError):
+ ''' a file missing failure '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
+
+ self.file_name = file_name
+ self.paths = paths
+
+ if message:
+ message += "\n"
+ if self.file_name:
+ message += "Could not find or access '%s'" % to_text(self.file_name)
+ else:
+ message += "Could not find file"
+
+ if self.paths and isinstance(self.paths, Sequence):
+ searched = to_text('\n\t'.join(self.paths))
+ if message:
+ message += "\n"
+ message += "Searched in:\n\t%s" % searched
+
+ message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
+
+ super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
+
+
+# These Exceptions are temporary, using them as flow control until we can get a better solution.
+# DO NOT USE as they will probably be removed soon.
+# We will port the action modules in our tree to use a context manager instead.
+class AnsibleAction(AnsibleRuntimeError):
+ ''' Base Exception for Action plugin flow control '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+
+ super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
+ if result is None:
+ self.result = {}
+ else:
+ self.result = result
+
+
+class AnsibleActionSkip(AnsibleAction):
+ ''' an action runtime skip'''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+ super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
+ self.result.update({'skipped': True, 'msg': message})
+
+
+class AnsibleActionFail(AnsibleAction):
+ ''' an action runtime failure'''
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+ super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
+ self.result.update({'failed': True, 'msg': message, 'exception': traceback.format_exc()})
+
+
+class _AnsibleActionDone(AnsibleAction):
+ ''' an action runtime early exit'''
+ pass
+
+
+class AnsiblePluginError(AnsibleError):
+ ''' base class for Ansible plugin-related errors that do not need AnsibleError contextual data '''
+ def __init__(self, message=None, plugin_load_context=None):
+ super(AnsiblePluginError, self).__init__(message)
+ self.plugin_load_context = plugin_load_context
+
+
+class AnsiblePluginRemovedError(AnsiblePluginError):
+ ''' a requested plugin has been removed '''
+ pass
+
+
+class AnsiblePluginCircularRedirect(AnsiblePluginError):
+ '''a cycle was detected in plugin redirection'''
+ pass
+
+
+class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError):
+ '''a collection is not supported by this version of Ansible'''
+ pass
+
+
+class AnsibleFilterTypeError(AnsibleTemplateError, TypeError):
+ ''' a Jinja filter templating failure due to bad type'''
+ pass
+
+
+class AnsiblePluginNotFound(AnsiblePluginError):
+ ''' Indicates we did not find an Ansible plugin '''
+ pass
diff --git a/lib/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
new file mode 100644
index 0000000..e10a3f9
--- /dev/null
+++ b/lib/ansible/errors/yaml_strings.py
@@ -0,0 +1,140 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__all__ = [
+ 'YAML_SYNTAX_ERROR',
+ 'YAML_POSITION_DETAILS',
+ 'YAML_COMMON_DICT_ERROR',
+ 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
+ 'YAML_COMMON_UNQUOTED_COLON_ERROR',
+ 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
+ 'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
+]
+
+YAML_SYNTAX_ERROR = """\
+Syntax Error while loading YAML.
+ %s"""
+
+YAML_POSITION_DETAILS = """\
+The error appears to be in '%s': line %s, column %s, but may
+be elsewhere in the file depending on the exact syntax problem.
+"""
+
+YAML_COMMON_DICT_ERROR = """\
+This one looks easy to fix. YAML thought it was looking for the start of a
+hash/dictionary and was confused to see a second "{". Most likely this was
+meant to be an ansible template evaluation instead, so we have to give the
+parser a small hint that we wanted a string instead. The solution here is to
+just quote the entire value.
+
+For instance, if the original line was:
+
+ app_path: {{ base_path }}/foo
+
+It should be written as:
+
+ app_path: "{{ base_path }}/foo"
+"""
+
+YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+missing quotes. Always quote template expression brackets when they
+start a value. For instance:
+
+ with_items:
+ - {{ foo }}
+
+Should be written as:
+
+ with_items:
+ - "{{ foo }}"
+"""
+
+YAML_COMMON_UNQUOTED_COLON_ERROR = """\
+This one looks easy to fix. There seems to be an extra unquoted colon in the line
+and this is confusing the parser. It was only expecting to find one free
+colon. The solution is just add some quotes around the colon, or quote the
+entire line after the first colon.
+
+For instance, if the original line was:
+
+ copy: src=file.txt dest=/path/filename:with_colon.txt
+
+It can be written as:
+
+ copy: src=file.txt dest='/path/filename:with_colon.txt'
+
+Or:
+
+ copy: 'src=file.txt dest=/path/filename:with_colon.txt'
+"""
+
+YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
+This one looks easy to fix. It seems that there is a value started
+with a quote, and the YAML parser is expecting to see the line ended
+with the same kind of quote. For instance:
+
+ when: "ok" in result.stdout
+
+Could be written as:
+
+ when: '"ok" in result.stdout'
+
+Or equivalently:
+
+ when: "'ok' in result.stdout"
+"""
+
+YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+unbalanced quotes. If starting a value with a quote, make sure the
+line ends with the same set of quotes. For instance this arbitrary
+example:
+
+ foo: "bad" "wolf"
+
+Could be written as:
+
+ foo: '"bad" "wolf"'
+"""
+
+YAML_COMMON_LEADING_TAB_ERROR = """\
+There appears to be a tab character at the start of the line.
+
+YAML does not use tabs for formatting. Tabs should be replaced with spaces.
+
+For example:
+ - name: update tooling
+ vars:
+ version: 1.2.3
+# ^--- there is a tab there.
+
+Should be written as:
+ - name: update tooling
+ vars:
+ version: 1.2.3
+# ^--- all spaces here.
+"""
+
+YAML_AND_SHORTHAND_ERROR = """\
+There appears to be both 'k=v' shorthand syntax and YAML in this task. \
+Only one syntax may be used.
+"""
diff --git a/lib/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py
new file mode 100644
index 0000000..ae8ccff
--- /dev/null
+++ b/lib/ansible/executor/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/executor/action_write_locks.py b/lib/ansible/executor/action_write_locks.py
new file mode 100644
index 0000000..fd82744
--- /dev/null
+++ b/lib/ansible/executor/action_write_locks.py
@@ -0,0 +1,46 @@
+# (c) 2016 - Red Hat, Inc. <info@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing.synchronize
+
+from multiprocessing import Lock
+
+from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
+
+if 'action_write_locks' not in globals():
+ # Do not initialize this more than once because it seems to bash
+ # the existing one. multiprocessing must be reloading the module
+ # when it forks?
+ action_write_locks = dict() # type: dict[str | None, multiprocessing.synchronize.Lock]
+
+ # Below is a Lock for use when we weren't expecting a named module. It gets used when an action
+ # plugin invokes a module whose name does not match with the action's name. Slightly less
+ # efficient as all processes with unexpected module names will wait on this lock
+ action_write_locks[None] = Lock()
+
+ # These plugins are known to be called directly by action plugins with names differing from the
+ # action plugin name. We precreate them here as an optimization.
+ # If a list of service managers is created in the future we can do the same for them.
+ mods = set(p['name'] for p in PKG_MGRS)
+
+ mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
+ for mod_name in mods:
+ action_write_locks[mod_name] = Lock()
diff --git a/lib/ansible/executor/discovery/__init__.py b/lib/ansible/executor/discovery/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/executor/discovery/__init__.py
diff --git a/lib/ansible/executor/discovery/python_target.py b/lib/ansible/executor/discovery/python_target.py
new file mode 100644
index 0000000..7137733
--- /dev/null
+++ b/lib/ansible/executor/discovery/python_target.py
@@ -0,0 +1,48 @@
+# Copyright: (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
+# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import platform
+import io
+import os
+
+
+def read_utf8_file(path, encoding='utf-8'):
+ if not os.access(path, os.R_OK):
+ return None
+ with io.open(path, 'r', encoding=encoding) as fd:
+ content = fd.read()
+
+ return content
+
+
+def get_platform_info():
+ result = dict(platform_dist_result=[])
+
+ if hasattr(platform, 'dist'):
+ result['platform_dist_result'] = platform.dist()
+
+ osrelease_content = read_utf8_file('/etc/os-release')
+ # try to fall back to /usr/lib/os-release
+ if not osrelease_content:
+ osrelease_content = read_utf8_file('/usr/lib/os-release')
+
+ result['osrelease_content'] = osrelease_content
+
+ return result
+
+
+def main():
+ info = get_platform_info()
+
+ print(json.dumps(info))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py
new file mode 100644
index 0000000..bfd8504
--- /dev/null
+++ b/lib/ansible/executor/interpreter_discovery.py
@@ -0,0 +1,207 @@
+# Copyright: (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import bisect
+import json
+import pkgutil
+import re
+
+from ansible import constants as C
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.distro import LinuxDistribution
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_versioned_doclink
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.module_utils.facts.system.distribution import Distribution
+from traceback import format_exc
+
+OS_FAMILY_LOWER = {k.lower(): v.lower() for k, v in Distribution.OS_FAMILY.items()}
+
+display = Display()
+foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
+
+
+class InterpreterDiscoveryRequiredError(Exception):
+ def __init__(self, message, interpreter_name, discovery_mode):
+ super(InterpreterDiscoveryRequiredError, self).__init__(message)
+ self.interpreter_name = interpreter_name
+ self.discovery_mode = discovery_mode
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ # TODO: proper repr impl
+ return self.message
+
+
+def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
+ # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
+ # get the system type from uname, and find any random Python that can get us the info we need. For supported
+ # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
+ # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
+ # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
+ # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
+
+ # FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
+ if interpreter_name != 'python':
+ raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
+
+ host = task_vars.get('inventory_hostname', 'unknown')
+ res = None
+ platform_type = 'unknown'
+ found_interpreters = [u'/usr/bin/python'] # fallback value
+ is_auto_legacy = discovery_mode.startswith('auto_legacy')
+ is_silent = discovery_mode.endswith('_silent')
+
+ try:
+ platform_python_map = C.config.get_config_value('_INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
+ bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
+
+ display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
+
+ # not all command -v impls accept a list of commands, so we have to call it once per python
+ command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
+ shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
+
+ # FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
+ res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
+
+ raw_stdout = res.get('stdout', u'')
+
+ match = foundre.match(raw_stdout)
+
+ if not match:
+ display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
+ raise ValueError('unexpected output from Python interpreter discovery')
+
+ platform_type = match.groups()[0].lower().strip()
+
+ found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
+
+ display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
+
+ if not found_interpreters:
+ if not is_silent:
+ action._discovery_warnings.append(u'No python interpreters found for '
+ u'host {0} (tried {1})'.format(host, bootstrap_python_list))
+ # this is lame, but returning None or throwing an exception is uglier
+ return u'/usr/bin/python'
+
+ if platform_type != 'linux':
+ raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
+
+ platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
+
+ # FUTURE: respect pipelining setting instead of just if the connection supports it?
+ if action._connection.has_pipelining:
+ res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
+ else:
+ # FUTURE: implement on-disk case (via script action or ?)
+ raise NotImplementedError('pipelining support required for extended interpreter discovery')
+
+ platform_info = json.loads(res.get('stdout'))
+
+ distro, version = _get_linux_distro(platform_info)
+
+ if not distro or not version:
+ raise NotImplementedError('unable to get Linux distribution/version info')
+
+ family = OS_FAMILY_LOWER.get(distro.lower().strip())
+
+ version_map = platform_python_map.get(distro.lower().strip()) or platform_python_map.get(family)
+ if not version_map:
+ raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
+
+ platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
+
+ # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
+ if is_auto_legacy:
+ if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters:
+ if not is_silent:
+ action._discovery_warnings.append(
+ u"Distribution {0} {1} on host {2} should use {3}, but is using "
+ u"/usr/bin/python for backward compatibility with prior Ansible releases. "
+ u"See {4} for more information"
+ .format(distro, version, host, platform_interpreter,
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
+ return u'/usr/bin/python'
+
+ if platform_interpreter not in found_interpreters:
+ if platform_interpreter not in bootstrap_python_list:
+ # sanity check to make sure we looked for it
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
+ .format(platform_interpreter, host))
+
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
+ u"discovered platform python interpreter was not present. See {5} "
+ u"for more information."
+ .format(distro, version, host, platform_interpreter, found_interpreters[0],
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
+ return found_interpreters[0]
+
+ return platform_interpreter
+ except NotImplementedError as ex:
+ display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
+ except Exception as ex:
+ if not is_silent:
+ display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
+ display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
+ if res and res.get('stderr'):
+ display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
+
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
+ u"another Python interpreter could change the meaning of that path. See {3} "
+ u"for more information."
+ .format(platform_type, host, found_interpreters[0],
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
+ return found_interpreters[0]
+
+
+def _get_linux_distro(platform_info):
+ dist_result = platform_info.get('platform_dist_result', [])
+
+ if len(dist_result) == 3 and any(dist_result):
+ return dist_result[0], dist_result[1]
+
+ osrelease_content = platform_info.get('osrelease_content')
+
+ if not osrelease_content:
+ return u'', u''
+
+ osr = LinuxDistribution._parse_os_release_content(osrelease_content)
+
+ return osr.get('id', u''), osr.get('version_id', u'')
+
+
+def _version_fuzzy_match(version, version_map):
+ # try exact match first
+ res = version_map.get(version)
+ if res:
+ return res
+
+ sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
+
+ find_looseversion = LooseVersion(version)
+
+ # slot match; return nearest previous version we're newer than
+ kpos = bisect.bisect(sorted_looseversions, find_looseversion)
+
+ if kpos == 0:
+ # older than everything in the list, return the oldest version
+ # TODO: warning-worthy?
+ return version_map.get(sorted_looseversions[0].vstring)
+
+ # TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
+
+ # return the next-oldest entry that we're newer than...
+ return version_map.get(sorted_looseversions[kpos - 1].vstring)
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
new file mode 100644
index 0000000..4d06acb
--- /dev/null
+++ b/lib/ansible/executor/module_common.py
@@ -0,0 +1,1428 @@
+# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import base64
+import datetime
+import json
+import os
+import shlex
+import zipfile
+import re
+import pkgutil
+
+from ast import AST, Import, ImportFrom
+from io import BytesIO
+
+from ansible.release import __version__, __author__
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
+from ansible.executor.powershell import module_manifest as ps_manifest
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+from ansible.plugins.loader import module_utils_loader
+from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get
+
+# Must import strategy and use write_locks from there
+# If we import write_locks directly then we end up binding a
+# variable to the object and then it never gets updated.
+from ansible.executor import action_write_locks
+
+from ansible.utils.display import Display
+from collections import namedtuple
+
+import importlib.util
+import importlib.machinery
+
+display = Display()
+
+ModuleUtilsProcessEntry = namedtuple('ModuleUtilsProcessEntry', ['name_parts', 'is_ambiguous', 'has_redirected_child', 'is_optional'])
+
+REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
+REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
+REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
+REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
+REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
+REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
+
+# We could end up writing out parameters with unicode characters so we need to
+# specify an encoding for the python source file
+ENCODING_STRING = u'# -*- coding: utf-8 -*-'
+b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
+
+# module_common is relative to module_utils, so fix the path
+_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
+
+# ******************************************************************************
+
+ANSIBALLZ_TEMPLATE = u'''%(shebang)s
+%(coding)s
+_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
+# This code is part of Ansible, but is an independent component.
+# The code in this particular templatable string, and this templatable string
+# only, is BSD licensed. Modules which end up using this snippet, which is
+# dynamically combined together by Ansible still belong to the author of the
+# module, and they may assign their own license to the complete work.
+#
+# Copyright (c), James Cammarata, 2016
+# Copyright (c), Toshio Kuratomi, 2016
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+def _ansiballz_main():
+ import os
+ import os.path
+
+ # Access to the working directory is required by Python when using pipelining, as well as for the coverage module.
+ # Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
+ try:
+ os.getcwd()
+ except OSError:
+ try:
+ os.chdir(os.path.expanduser('~'))
+ except OSError:
+ os.chdir('/')
+
+%(rlimit)s
+
+ import sys
+ import __main__
+
+ # For some distros and python versions we pick up this script in the temporary
+ # directory. This leads to problems when the ansible module masks a python
+ # library that another import needs. We have not figured out what about the
+ # specific distros and python versions causes this to behave differently.
+ #
+ # Tested distros:
+ # Fedora23 with python3.4 Works
+ # Ubuntu15.10 with python2.7 Works
+ # Ubuntu15.10 with python3.4 Fails without this
+ # Ubuntu16.04.1 with python3.5 Fails without this
+ # To test on another platform:
+ # * use the copy module (since this shadows the stdlib copy module)
+ # * Turn off pipelining
+ # * Make sure that the destination file does not exist
+ # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
+ # This will traceback in shutil. Looking at the complete traceback will show
+ # that shutil is importing copy which finds the ansible module instead of the
+ # stdlib module
+ scriptdir = None
+ try:
+ scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
+ except (AttributeError, OSError):
+ # Some platforms don't set __file__ when reading from stdin
+ # OSX raises OSError if using abspath() in a directory we don't have
+ # permission to read (realpath calls abspath)
+ pass
+
+ # Strip cwd from sys.path to avoid potential permissions issues
+ excludes = set(('', '.', scriptdir))
+ sys.path = [p for p in sys.path if p not in excludes]
+
+ import base64
+ import runpy
+ import shutil
+ import tempfile
+ import zipfile
+
+ if sys.version_info < (3,):
+ PY3 = False
+ else:
+ PY3 = True
+
+ ZIPDATA = """%(zipdata)s"""
+
+ # Note: temp_path isn't needed once we switch to zipimport
+ def invoke_module(modlib_path, temp_path, json_params):
+ # When installed via setuptools (including python setup.py install),
+ # ansible may be installed with an easy-install.pth file. That file
+ # may load the system-wide install of ansible rather than the one in
+ # the module. sitecustomize is the only way to override that setting.
+ z = zipfile.ZipFile(modlib_path, mode='a')
+
+ # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
+ sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
+ sitecustomize = sitecustomize.encode('utf-8')
+ # Use a ZipInfo to work around zipfile limitation on hosts with
+ # clocks set to a pre-1980 year (for instance, Raspberry Pi)
+ zinfo = zipfile.ZipInfo()
+ zinfo.filename = 'sitecustomize.py'
+ zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
+ z.writestr(zinfo, sitecustomize)
+ z.close()
+
+ # Put the zipped up module_utils we got from the controller first in the python path so that we
+ # can monkeypatch the right basic
+ sys.path.insert(0, modlib_path)
+
+ # Monkeypatch the parameters into basic
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = json_params
+%(coverage)s
+ # Run the module! By importing it as '__main__', it thinks it is executing as a script
+ runpy.run_module(mod_name='%(module_fqn)s', init_globals=dict(_module_fqn='%(module_fqn)s', _modlib_path=modlib_path),
+ run_name='__main__', alter_sys=True)
+
+ # Ansible modules must exit themselves
+ print('{"msg": "New-style module did not handle its own exit", "failed": true}')
+ sys.exit(1)
+
+ def debug(command, zipped_mod, json_params):
+ # The code here normally doesn't run. It's only used for debugging on the
+ # remote machine.
+ #
+ # The subcommands in this function make it easier to debug ansiballz
+ # modules. Here's the basic steps:
+ #
+ # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
+ # to save the module file remotely::
+ # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
+ #
+ # Part of the verbose output will tell you where on the remote machine the
+ # module was written to::
+ # [...]
+ # <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
+ # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
+ # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
+ # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
+ # [...]
+ #
+ # Login to the remote machine and run the module file via from the previous
+ # step with the explode subcommand to extract the module payload into
+ # source files::
+ # $ ssh host1
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
+ # Module expanded into:
+ # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
+ #
+ # You can now edit the source files to instrument the code or experiment with
+ # different parameter values. When you're ready to run the code you've modified
+ # (instead of the code from the actual zipped module), use the execute subcommand like this::
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
+
+ # Okay to use __file__ here because we're running from a kept file
+ basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
+ args_path = os.path.join(basedir, 'args')
+
+ if command == 'explode':
+ # transform the ZIPDATA into an exploded directory of code and then
+ # print the path to the code. This is an easy way for people to look
+ # at the code on the remote machine for debugging it in that
+ # environment
+ z = zipfile.ZipFile(zipped_mod)
+ for filename in z.namelist():
+ if filename.startswith('/'):
+ raise Exception('Something wrong with this module zip file: should not contain absolute paths')
+
+ dest_filename = os.path.join(basedir, filename)
+ if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
+ os.makedirs(dest_filename)
+ else:
+ directory = os.path.dirname(dest_filename)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ f = open(dest_filename, 'wb')
+ f.write(z.read(filename))
+ f.close()
+
+ # write the args file
+ f = open(args_path, 'wb')
+ f.write(json_params)
+ f.close()
+
+ print('Module expanded into:')
+ print('%%s' %% basedir)
+ exitcode = 0
+
+ elif command == 'execute':
+ # Execute the exploded code instead of executing the module from the
+ # embedded ZIPDATA. This allows people to easily run their modified
+ # code on the remote machine to see how changes will affect it.
+
+ # Set pythonpath to the debug dir
+ sys.path.insert(0, basedir)
+
+ # read in the args file which the user may have modified
+ with open(args_path, 'rb') as f:
+ json_params = f.read()
+
+ # Monkeypatch the parameters into basic
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = json_params
+
+ # Run the module! By importing it as '__main__', it thinks it is executing as a script
+ runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
+
+ # Ansible modules must exit themselves
+ print('{"msg": "New-style module did not handle its own exit", "failed": true}')
+ sys.exit(1)
+
+ else:
+ print('WARNING: Unknown debug command. Doing nothing.')
+ exitcode = 0
+
+ return exitcode
+
+ #
+ # See comments in the debug() method for information on debugging
+ #
+
+ ANSIBALLZ_PARAMS = %(params)s
+ if PY3:
+ ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
+ try:
+ # There's a race condition with the controller removing the
+ # remote_tmpdir and this module executing under async. So we cannot
+ # store this in remote_tmpdir (use system tempdir instead)
+ # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
+ # (this helps ansible-test produce coverage stats)
+ temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
+
+ zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
+
+ with open(zipped_mod, 'wb') as modlib:
+ modlib.write(base64.b64decode(ZIPDATA))
+
+ if len(sys.argv) == 2:
+ exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
+ else:
+ # Note: temp_path isn't needed once we switch to zipimport
+ invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
+ finally:
+ try:
+ shutil.rmtree(temp_path)
+ except (NameError, OSError):
+ # tempdir creation probably failed
+ pass
+ sys.exit(exitcode)
+
+if __name__ == '__main__':
+ _ansiballz_main()
+'''
+
+ANSIBALLZ_COVERAGE_TEMPLATE = '''
+ os.environ['COVERAGE_FILE'] = '%(coverage_output)s=python-%%s=coverage' %% '.'.join(str(v) for v in sys.version_info[:2])
+
+ import atexit
+
+ try:
+ import coverage
+ except ImportError:
+ print('{"msg": "Could not import `coverage` module.", "failed": true}')
+ sys.exit(1)
+
+ cov = coverage.Coverage(config_file='%(coverage_config)s')
+
+ def atexit_coverage():
+ cov.stop()
+ cov.save()
+
+ atexit.register(atexit_coverage)
+
+ cov.start()
+'''
+
+ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
+ try:
+ if PY3:
+ import importlib.util
+ if importlib.util.find_spec('coverage') is None:
+ raise ImportError
+ else:
+ import imp
+ imp.find_module('coverage')
+ except ImportError:
+ print('{"msg": "Could not find `coverage` module.", "failed": true}')
+ sys.exit(1)
+'''
+
+ANSIBALLZ_RLIMIT_TEMPLATE = '''
+ import resource
+
+ existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ # adjust soft limit subject to existing hard limit
+ requested_soft = min(existing_hard, %(rlimit_nofile)d)
+
+ if requested_soft != existing_soft:
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
+ except ValueError:
+ # some platforms (eg macOS) lie about their hard limit
+ pass
+'''
+
+
+def _strip_comments(source):
+ # Strip comments and blank lines from the wrapper
+ buf = []
+ for line in source.splitlines():
+ l = line.strip()
+ if not l or l.startswith(u'#'):
+ continue
+ buf.append(line)
+ return u'\n'.join(buf)
+
+
+if C.DEFAULT_KEEP_REMOTE_FILES:
+ # Keep comments when KEEP_REMOTE_FILES is set. That way users will see
+ # the comments with some nice usage instructions
+ ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
+else:
+ # ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
+ ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
+
+# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
+# Do this instead of getting site-packages from distutils.sysconfig so we work when we
+# haven't been installed
+site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % re.escape(site_packages))
+COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$')
+
+# Detect new-style Python modules by looking for required imports:
+# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
+# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
+# import ansible.module_utils[.basic]
+# from ansible.module_utils[ import basic]
+# from ansible.module_utils[.basic import AnsibleModule]
+# from ..module_utils[ import basic]
+# from ..module_utils[.basic import AnsibleModule]
+NEW_STYLE_PYTHON_MODULE_RE = re.compile(
+ # Relative imports
+ br'(?:from +\.{2,} *module_utils.* +import |'
+ # Collection absolute imports:
+ br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
+ br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
+ # Core absolute imports
+ br'from +ansible\.module_utils.* +import |'
+ br'import +ansible\.module_utils\.)'
+)
+
+
+class ModuleDepFinder(ast.NodeVisitor):
+ def __init__(self, module_fqn, tree, is_pkg_init=False, *args, **kwargs):
+ """
+ Walk the ast tree for the python module.
+ :arg module_fqn: The fully qualified name to reach this module in dotted notation.
+ example: ansible.module_utils.basic
+ :arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow
+ relative import expansion to use the proper package level without having imported it locally first.
+
+ Save submodule[.submoduleN][.identifier] into self.submodules
+ when they are from ansible.module_utils or ansible_collections packages
+
+ self.submodules will end up with tuples like:
+ - ('ansible', 'module_utils', 'basic',)
+ - ('ansible', 'module_utils', 'urls', 'fetch_url')
+ - ('ansible', 'module_utils', 'database', 'postgres')
+ - ('ansible', 'module_utils', 'database', 'postgres', 'quote')
+ - ('ansible', 'module_utils', 'database', 'postgres', 'quote')
+ - ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
+
+ It's up to calling code to determine whether the final element of the
+ tuple are module names or something else (function, class, or variable names)
+ .. seealso:: :python3:class:`ast.NodeVisitor`
+ """
+ super(ModuleDepFinder, self).__init__(*args, **kwargs)
+ self._tree = tree # squirrel this away so we can compare node parents to it
+ self.submodules = set()
+ self.optional_imports = set()
+ self.module_fqn = module_fqn
+ self.is_pkg_init = is_pkg_init
+
+ self._visit_map = {
+ Import: self.visit_Import,
+ ImportFrom: self.visit_ImportFrom,
+ }
+
+ self.visit(tree)
+
+ def generic_visit(self, node):
+ """Overridden ``generic_visit`` that makes some assumptions about our
+ use case, and improves performance by calling visitors directly instead
+ of calling ``visit`` to offload calling visitors.
+ """
+ generic_visit = self.generic_visit
+ visit_map = self._visit_map
+ for field, value in ast.iter_fields(node):
+ if isinstance(value, list):
+ for item in value:
+ if isinstance(item, (Import, ImportFrom)):
+ item.parent = node
+ visit_map[item.__class__](item)
+ elif isinstance(item, AST):
+ generic_visit(item)
+
+ visit = generic_visit
+
+ def visit_Import(self, node):
+ """
+ Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
+
+ We save these as interesting submodules when the imported library is in ansible.module_utils
+ or ansible.collections
+ """
+ for alias in node.names:
+ if (alias.name.startswith('ansible.module_utils.') or
+ alias.name.startswith('ansible_collections.')):
+ py_mod = tuple(alias.name.split('.'))
+ self.submodules.add(py_mod)
+ # if the import's parent is the root document, it's a required import, otherwise it's optional
+ if node.parent != self._tree:
+ self.optional_imports.add(py_mod)
+ self.generic_visit(node)
+
+ def visit_ImportFrom(self, node):
+ """
+ Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
+
+ Also has to handle relative imports
+
+ We save these as interesting submodules when the imported library is in ansible.module_utils
+ or ansible.collections
+ """
+
+ # FIXME: These should all get skipped:
+ # from ansible.executor import module_common
+ # from ...executor import module_common
+ # from ... import executor (Currently it gives a non-helpful error)
+ if node.level > 0:
+ # if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior)
+ level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level
+ if self.module_fqn:
+ parts = tuple(self.module_fqn.split('.'))
+ if node.module:
+ # relative import: from .module import x
+ node_module = '.'.join(parts[:level_slice_offset] + (node.module,))
+ else:
+ # relative import: from . import x
+ node_module = '.'.join(parts[:level_slice_offset])
+ else:
+ # fall back to an absolute import
+ node_module = node.module
+ else:
+ # absolute import: from module import x
+ node_module = node.module
+
+ # Specialcase: six is a special case because of its
+ # import logic
+ py_mod = None
+ if node.names[0].name == '_six':
+ self.submodules.add(('_six',))
+ elif node_module.startswith('ansible.module_utils'):
+ # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
+ # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
+ # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
+ # from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
+ py_mod = tuple(node_module.split('.'))
+
+ elif node_module.startswith('ansible_collections.'):
+ if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
+ # from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
+ # from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
+ # FIXME: Unhandled cornercase (needs to be ignored):
+ # from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
+ py_mod = tuple(node_module.split('.'))
+ else:
+ # Not from module_utils so ignore. for instance:
+ # from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
+ pass
+
+ if py_mod:
+ for alias in node.names:
+ self.submodules.add(py_mod + (alias.name,))
+ # if the import's parent is the root document, it's a required import, otherwise it's optional
+ if node.parent != self._tree:
+ self.optional_imports.add(py_mod + (alias.name,))
+
+ self.generic_visit(node)
+
+
+def _slurp(path):
+ if not os.path.exists(path):
+ raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
+ with open(path, 'rb') as fd:
+ data = fd.read()
+ return data
+
+
+def _get_shebang(interpreter, task_vars, templar, args=tuple(), remote_is_local=False):
+ """
+ Handles the different ways ansible allows overriding the shebang target for a module.
+ """
+ # FUTURE: add logical equivalence for python3 in the case of py3-only modules
+
+ interpreter_name = os.path.basename(interpreter).strip()
+
+ # name for interpreter var
+ interpreter_config = u'ansible_%s_interpreter' % interpreter_name
+ # key for config
+ interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
+
+ interpreter_out = None
+
+ # looking for python, rest rely on matching vars
+ if interpreter_name == 'python':
+ # skip detection for network os execution, use playbook supplied one if possible
+ if remote_is_local:
+ interpreter_out = task_vars['ansible_playbook_python']
+
+ # a config def exists for this interpreter type; consult config for the value
+ elif C.config.get_configuration_definition(interpreter_config_key):
+
+ interpreter_from_config = C.config.get_config_value(interpreter_config_key, variables=task_vars)
+ interpreter_out = templar.template(interpreter_from_config.strip())
+
+ # handle interpreter discovery if requested or empty interpreter was provided
+ if not interpreter_out or interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
+
+ discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
+ facts_from_task_vars = task_vars.get('ansible_facts', {})
+
+ if discovered_interpreter_config not in facts_from_task_vars:
+ # interpreter discovery is desired, but has not been run for this host
+ raise InterpreterDiscoveryRequiredError("interpreter discovery needed", interpreter_name=interpreter_name, discovery_mode=interpreter_out)
+ else:
+ interpreter_out = facts_from_task_vars[discovered_interpreter_config]
+ else:
+ raise InterpreterDiscoveryRequiredError("interpreter discovery required", interpreter_name=interpreter_name, discovery_mode='auto_legacy')
+
+ elif interpreter_config in task_vars:
+ # for non python we consult vars for a possible direct override
+ interpreter_out = templar.template(task_vars.get(interpreter_config).strip())
+
+ if not interpreter_out:
+ # nothing matched(None) or in case someone configures empty string or empty intepreter
+ interpreter_out = interpreter
+
+ # set shebang
+ shebang = u'#!{0}'.format(interpreter_out)
+ if args:
+ shebang = shebang + u' ' + u' '.join(args)
+
+ return shebang, interpreter_out
+
+
+class ModuleUtilLocatorBase:
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
+ self._is_ambiguous = is_ambiguous
+ # a child package redirection could cause intermediate package levels to be missing, eg
+ # from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for
+ # the intermediate packages x.y.z, so we'll need to supply empty packages for those
+ self._child_is_redirected = child_is_redirected
+ self._is_optional = is_optional
+ self.found = False
+ self.redirected = False
+ self.fq_name_parts = fq_name_parts
+ self.source_code = ''
+ self.output_path = ''
+ self.is_package = False
+ self._collection_name = None
+ # for ambiguous imports, we should only test for things more than one level below module_utils
+ # this lets us detect erroneous imports and redirections earlier
+ if is_ambiguous and len(self._get_module_utils_remainder_parts(fq_name_parts)) > 1:
+ self.candidate_names = [fq_name_parts, fq_name_parts[:-1]]
+ else:
+ self.candidate_names = [fq_name_parts]
+
+ @property
+ def candidate_names_joined(self):
+ return ['.'.join(n) for n in self.candidate_names]
+
+ def _handle_redirect(self, name_parts):
+ module_utils_relative_parts = self._get_module_utils_remainder_parts(name_parts)
+
+ # only allow redirects from below module_utils- if above that, bail out (eg, parent package names)
+ if not module_utils_relative_parts:
+ return False
+
+ try:
+ collection_metadata = _get_collection_metadata(self._collection_name)
+ except ValueError as ve: # collection not found or some other error related to collection load
+ if self._is_optional:
+ return False
+ raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}'
+ .format('.'.join(name_parts), self._collection_name, to_native(ve)))
+
+ routing_entry = _nested_dict_get(collection_metadata, ['plugin_routing', 'module_utils', '.'.join(module_utils_relative_parts)])
+ if not routing_entry:
+ return False
+ # FIXME: add deprecation warning support
+
+ dep_or_ts = routing_entry.get('tombstone')
+ removed = dep_or_ts is not None
+ if not removed:
+ dep_or_ts = routing_entry.get('deprecation')
+
+ if dep_or_ts:
+ removal_date = dep_or_ts.get('removal_date')
+ removal_version = dep_or_ts.get('removal_version')
+ warning_text = dep_or_ts.get('warning_text')
+
+ msg = 'module_util {0} has been removed'.format('.'.join(name_parts))
+ if warning_text:
+ msg += ' ({0})'.format(warning_text)
+ else:
+ msg += '.'
+
+ display.deprecated(msg, removal_version, removed, removal_date, self._collection_name)
+ if 'redirect' in routing_entry:
+ self.redirected = True
+ source_pkg = '.'.join(name_parts)
+ self.is_package = True # treat all redirects as packages
+ redirect_target_pkg = routing_entry['redirect']
+
+ # expand FQCN redirects
+ if not redirect_target_pkg.startswith('ansible_collections'):
+ split_fqcn = redirect_target_pkg.split('.')
+ if len(split_fqcn) < 3:
+ raise Exception('invalid redirect for {0}: {1}'.format(source_pkg, redirect_target_pkg))
+ # assume it's an FQCN, expand it
+ redirect_target_pkg = 'ansible_collections.{0}.{1}.plugins.module_utils.{2}'.format(
+ split_fqcn[0], # ns
+ split_fqcn[1], # coll
+ '.'.join(split_fqcn[2:]) # sub-module_utils remainder
+ )
+ display.vvv('redirecting module_util {0} to {1}'.format(source_pkg, redirect_target_pkg))
+ self.source_code = self._generate_redirect_shim_source(source_pkg, redirect_target_pkg)
+ return True
+ return False
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ # subclasses should override to return the name parts after module_utils
+ return []
+
+ def _get_module_utils_remainder(self, name_parts):
+ # return the remainder parts as a package string
+ return '.'.join(self._get_module_utils_remainder_parts(name_parts))
+
+ def _find_module(self, name_parts):
+ return False
+
+ def _locate(self, redirect_first=True):
+ for candidate_name_parts in self.candidate_names:
+ if redirect_first and self._handle_redirect(candidate_name_parts):
+ break
+
+ if self._find_module(candidate_name_parts):
+ break
+
+ if not redirect_first and self._handle_redirect(candidate_name_parts):
+ break
+
+ else: # didn't find what we were looking for- last chance for packages whose parents were redirected
+ if self._child_is_redirected: # make fake packages
+ self.is_package = True
+ self.source_code = ''
+ else: # nope, just bail
+ return
+
+ if self.is_package:
+ path_parts = candidate_name_parts + ('__init__',)
+ else:
+ path_parts = candidate_name_parts
+ self.found = True
+ self.output_path = os.path.join(*path_parts) + '.py'
+ self.fq_name_parts = candidate_name_parts
+
+ def _generate_redirect_shim_source(self, fq_source_module, fq_target_module):
+ return """
+import sys
+import {1} as mod
+
+sys.modules['{0}'] = mod
+""".format(fq_source_module, fq_target_module)
+
+ # FIXME: add __repr__ impl
+
+
+class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
+ def __init__(self, fq_name_parts, is_ambiguous=False, mu_paths=None, child_is_redirected=False):
+ super(LegacyModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
+
+ if fq_name_parts[0:2] != ('ansible', 'module_utils'):
+ raise Exception('this class can only locate from ansible.module_utils, got {0}'.format(fq_name_parts))
+
+ if fq_name_parts[2] == 'six':
+ # FIXME: handle the ansible.module_utils.six._six case with a redirect or an internal _six attr on six itself?
+ # six creates its submodules at runtime; convert all these to just 'ansible.module_utils.six'
+ fq_name_parts = ('ansible', 'module_utils', 'six')
+ self.candidate_names = [fq_name_parts]
+
+ self._mu_paths = mu_paths
+ self._collection_name = 'ansible.builtin' # legacy module utils always look in ansible.builtin for redirects
+ self._locate(redirect_first=False) # let local stuff override redirects for legacy
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ return name_parts[2:] # eg, foo.bar for ansible.module_utils.foo.bar
+
+ def _find_module(self, name_parts):
+ rel_name_parts = self._get_module_utils_remainder_parts(name_parts)
+
+ # no redirection; try to find the module
+ if len(rel_name_parts) == 1: # direct child of module_utils, just search the top-level dirs we were given
+ paths = self._mu_paths
+ else: # a nested submodule of module_utils, extend the paths given with the intermediate package names
+ paths = [os.path.join(p, *rel_name_parts[:-1]) for p in
+ self._mu_paths] # extend the MU paths with the relative bit
+
+ # find_spec needs the full module name
+ self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths)
+ if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
+ self.is_package = info.origin.endswith('/__init__.py')
+ path = info.origin
+ else:
+ return False
+ self.source_code = _slurp(path)
+
+ return True
+
+
+class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False, is_optional=False):
+ super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected, is_optional)
+
+ if fq_name_parts[0] != 'ansible_collections':
+ raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts))
+ elif len(fq_name_parts) >= 6 and fq_name_parts[3:5] != ('plugins', 'module_utils'):
+ raise Exception('CollectionModuleUtilLocator can only locate below ansible_collections.(ns).(coll).plugins.module_utils, got {0}'
+ .format(fq_name_parts))
+
+ self._collection_name = '.'.join(fq_name_parts[1:3])
+
+ self._locate()
+
+ def _find_module(self, name_parts):
+ # synthesize empty inits for packages down through module_utils- we don't want to allow those to be shipped over, but the
+ # package hierarchy needs to exist
+ if len(name_parts) < 6:
+ self.source_code = ''
+ self.is_package = True
+ return True
+
+ # NB: we can't use pkgutil.get_data safely here, since we don't want to import/execute package/module code on
+ # the controller while analyzing/assembling the module, so we'll have to manually import the collection's
+ # Python package to locate it (import root collection, reassemble resource path beneath, fetch source)
+
+ collection_pkg_name = '.'.join(name_parts[0:3])
+ resource_base_path = os.path.join(*name_parts[3:])
+
+ src = None
+ # look for package_dir first, then module
+ try:
+ src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py')))
+ except ImportError:
+ pass
+
+ # TODO: we might want to synthesize fake inits for py3-style packages, for now they're required beneath module_utils
+
+ if src is not None: # empty string is OK
+ self.is_package = True
+ else:
+ try:
+ src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py'))
+ except ImportError:
+ pass
+
+ if src is None: # empty string is OK
+ return False
+
+ self.source_code = src
+ return True
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar
+
+
+def recursive_finder(name, module_fqn, module_data, zf):
+ """
+ Using ModuleDepFinder, make sure we have all of the module_utils files that
+ the module and its module_utils files needs. (no longer actually recursive)
+ :arg name: Name of the python module we're examining
+ :arg module_fqn: Fully qualified name of the python module we're scanning
+ :arg module_data: string Python code of the module we're scanning
+ :arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
+ which we're assembling
+ """
+
+ # py_module_cache maps python module names to a tuple of the code in the module
+ # and the pathname to the module.
+ # Here we pre-load it with modules which we create without bothering to
+ # read from actual files (In some cases, these need to differ from what ansible
+ # ships because they're namespace packages in the module)
+ # FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs
+ py_module_cache = {
+ ('ansible',): (
+ b'from pkgutil import extend_path\n'
+ b'__path__=extend_path(__path__,__name__)\n'
+ b'__version__="' + to_bytes(__version__) +
+ b'"\n__author__="' + to_bytes(__author__) + b'"\n',
+ 'ansible/__init__.py'),
+ ('ansible', 'module_utils'): (
+ b'from pkgutil import extend_path\n'
+ b'__path__=extend_path(__path__,__name__)\n',
+ 'ansible/module_utils/__init__.py')}
+
+ module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
+ module_utils_paths.append(_MODULE_UTILS_PATH)
+
+ # Parse the module code and find the imports of ansible.module_utils
+ try:
+ tree = compile(module_data, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
+ except (SyntaxError, IndentationError) as e:
+ raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
+
+ finder = ModuleDepFinder(module_fqn, tree)
+
+ # the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
+ # or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
+ modules_to_process = [ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
+
+ # HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
+ modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False, is_optional=False))
+
+ # we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
+ while modules_to_process:
+ modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
+ py_module_name, is_ambiguous, child_is_redirected, is_optional = modules_to_process.pop(0)
+
+ if py_module_name in py_module_cache:
+ # this is normal; we'll often see the same module imported many times, but we only need to process it once
+ continue
+
+ if py_module_name[0:2] == ('ansible', 'module_utils'):
+ module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
+ mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
+ elif py_module_name[0] == 'ansible_collections':
+ module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
+ child_is_redirected=child_is_redirected, is_optional=is_optional)
+ else:
+ # FIXME: dot-joined result
+ display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
+ % [py_module_name])
+ continue
+
+ # Could not find the module. Construct a helpful error message.
+ if not module_info.found:
+ if is_optional:
+ # this was a best-effort optional import that we couldn't find, oh well, move along...
+ continue
+ # FIXME: use dot-joined candidate names
+ msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
+ raise AnsibleError(msg)
+
+ # check the cache one more time with the module we actually found, since the name could be different than the input
+ # eg, imported name vs module
+ if module_info.fq_name_parts in py_module_cache:
+ continue
+
+ # compile the source, process all relevant imported modules
+ try:
+ tree = compile(module_info.source_code, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
+ except (SyntaxError, IndentationError) as e:
+ raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
+
+ finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
+ modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
+ for m in finder.submodules if m not in py_module_cache)
+
+ # we've processed this item, add it to the output list
+ py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
+
+ # ensure we process all ancestor package inits
+ accumulated_pkg_name = []
+ for pkg in module_info.fq_name_parts[:-1]:
+ accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
+ normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
+ if normalized_name not in py_module_cache:
+ modules_to_process.append(ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=is_optional))
+
+ for py_module_name in py_module_cache:
+ py_module_file_name = py_module_cache[py_module_name][1]
+
+ zf.writestr(py_module_file_name, py_module_cache[py_module_name][0])
+ mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
+ display.vvvvv("Including module_utils file %s" % mu_file)
+
+
+def _is_binary(b_module_data):
+ textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
+ start = b_module_data[:1024]
+ return bool(start.translate(None, textchars))
+
+
+def _get_ansible_module_fqn(module_path):
+ """
+ Get the fully qualified name for an ansible module based on its pathname
+
+ remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
+ Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
+ .. warning:: This function is for ansible modules only. It won't work for other things
+ (non-module plugins, etc)
+ """
+ remote_module_fqn = None
+
+ # Is this a core module?
+ match = CORE_LIBRARY_PATH_RE.search(module_path)
+ if not match:
+ # Is this a module in a collection?
+ match = COLLECTION_PATH_RE.search(module_path)
+
+ # We can tell the FQN for core modules and collection modules
+ if match:
+ path = match.group('path')
+ if '.' in path:
+ # FQNs must be valid as python identifiers. This sanity check has failed.
+ # we could check other things as well
+ raise ValueError('Module name (or path) was not a valid python identifier')
+
+ remote_module_fqn = '.'.join(path.split('/'))
+ else:
+ # Currently we do not handle modules in roles so we can end up here for that reason
+ raise ValueError("Unable to determine module's fully qualified name")
+
+ return remote_module_fqn
+
+
+def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
+ """Add a module from ansible or from an ansible collection into the module zip"""
+ module_path_parts = remote_module_fqn.split('.')
+
+ # Write the module
+ module_path = '/'.join(module_path_parts) + '.py'
+ zf.writestr(module_path, b_module_data)
+
+ # Write the __init__.py's necessary to get there
+ if module_path_parts[0] == 'ansible':
+ # The ansible namespace is setup as part of the module_utils setup...
+ start = 2
+ existing_paths = frozenset()
+ else:
+ # ... but ansible_collections and other toplevels are not
+ start = 1
+ existing_paths = frozenset(zf.namelist())
+
+ for idx in range(start, len(module_path_parts)):
+ package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
+ # If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
+ if package_path in existing_paths:
+ continue
+ # Note: We don't want to include more than one ansible module in a payload at this time
+ # so no need to fill the __init__.py with namespace code
+ zf.writestr(package_path, b'')
+
+
+def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
+ become_method, become_user, become_password, become_flags, environment, remote_is_local=False):
+ """
+ Given the source of the module, convert it to a Jinja2 template to insert
+ module code and return whether it's a new or old style module.
+ """
+ module_substyle = module_style = 'old'
+
+ # module_style is something important to calling code (ActionBase). It
+ # determines how arguments are formatted (json vs k=v) and whether
+ # a separate arguments file needs to be sent over the wire.
+ # module_substyle is extra information that's useful internally. It tells
+ # us what we have to look to substitute in the module files and whether
+ # we're using module replacer or ansiballz to format the module itself.
+ if _is_binary(b_module_data):
+ module_substyle = module_style = 'binary'
+ elif REPLACER in b_module_data:
+ # Do REPLACER before from ansible.module_utils because we need make sure
+ # we substitute "from ansible.module_utils basic" for REPLACER
+ module_style = 'new'
+ module_substyle = 'python'
+ b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
+ elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
+ module_style = 'new'
+ module_substyle = 'python'
+ elif REPLACER_WINDOWS in b_module_data:
+ module_style = 'new'
+ module_substyle = 'powershell'
+ b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
+ elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
+ or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
+ or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
+ or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
+ or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
+ module_style = 'new'
+ module_substyle = 'powershell'
+ elif REPLACER_JSONARGS in b_module_data:
+ module_style = 'new'
+ module_substyle = 'jsonargs'
+ elif b'WANT_JSON' in b_module_data:
+ module_substyle = module_style = 'non_native_want_json'
+
+ shebang = None
+ # Neither old-style, non_native_want_json nor binary modules should be modified
+ # except for the shebang line (Done by modify_module)
+ if module_style in ('old', 'non_native_want_json', 'binary'):
+ return b_module_data, module_style, shebang
+
+ output = BytesIO()
+
+ try:
+ remote_module_fqn = _get_ansible_module_fqn(module_path)
+ except ValueError:
+ # Modules in roles currently are not found by the fqn heuristic so we
+ # fallback to this. This means that relative imports inside a module from
+ # a role may fail. Absolute imports should be used for future-proofness.
+ # People should start writing collections instead of modules in roles so we
+ # may never fix this
+ display.debug('ANSIBALLZ: Could not determine module FQN')
+ remote_module_fqn = 'ansible.modules.%s' % module_name
+
+ if module_substyle == 'python':
+ params = dict(ANSIBLE_MODULE_ARGS=module_args,)
+ try:
+ python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True))
+ except TypeError as e:
+ raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
+
+ try:
+ compression_method = getattr(zipfile, module_compression)
+ except AttributeError:
+ display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
+ compression_method = zipfile.ZIP_STORED
+
+ lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
+ cached_module_filename = os.path.join(lookup_path, "%s-%s" % (remote_module_fqn, module_compression))
+
+ zipdata = None
+ # Optimization -- don't lock if the module has already been cached
+ if os.path.exists(cached_module_filename):
+ display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
+ with open(cached_module_filename, 'rb') as module_data:
+ zipdata = module_data.read()
+ else:
+ if module_name in action_write_locks.action_write_locks:
+ display.debug('ANSIBALLZ: Using lock for %s' % module_name)
+ lock = action_write_locks.action_write_locks[module_name]
+ else:
+ # If the action plugin directly invokes the module (instead of
+ # going through a strategy) then we don't have a cross-process
+ # Lock specifically for this module. Use the "unexpected
+ # module" lock instead
+ display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
+ lock = action_write_locks.action_write_locks[None]
+
+ display.debug('ANSIBALLZ: Acquiring lock')
+ with lock:
+ display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
+ # Check that no other process has created this while we were
+ # waiting for the lock
+ if not os.path.exists(cached_module_filename):
+ display.debug('ANSIBALLZ: Creating module')
+ # Create the module zip data
+ zipoutput = BytesIO()
+ zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
+
+ # walk the module imports, looking for module_utils to send- they'll be added to the zipfile
+ recursive_finder(module_name, remote_module_fqn, b_module_data, zf)
+
+ display.debug('ANSIBALLZ: Writing module into payload')
+ _add_module_to_zip(zf, remote_module_fqn, b_module_data)
+
+ zf.close()
+ zipdata = base64.b64encode(zipoutput.getvalue())
+
+ # Write the assembled module to a temp file (write to temp
+ # so that no one looking for the file reads a partially
+ # written file)
+ #
+ # FIXME: Once split controller/remote is merged, this can be simplified to
+ # os.makedirs(lookup_path, exist_ok=True)
+ if not os.path.exists(lookup_path):
+ try:
+ # Note -- if we have a global function to setup, that would
+ # be a better place to run this
+ os.makedirs(lookup_path)
+ except OSError:
+ # Multiple processes tried to create the directory. If it still does not
+ # exist, raise the original exception.
+ if not os.path.exists(lookup_path):
+ raise
+ display.debug('ANSIBALLZ: Writing module')
+ with open(cached_module_filename + '-part', 'wb') as f:
+ f.write(zipdata)
+
+ # Rename the file into its final position in the cache so
+ # future users of this module can read it off the
+ # filesystem instead of constructing from scratch.
+ display.debug('ANSIBALLZ: Renaming module')
+ os.rename(cached_module_filename + '-part', cached_module_filename)
+ display.debug('ANSIBALLZ: Done creating module')
+
+ if zipdata is None:
+ display.debug('ANSIBALLZ: Reading module after lock')
+ # Another process wrote the file while we were waiting for
+ # the write lock. Go ahead and read the data from disk
+ # instead of re-creating it.
+ try:
+ with open(cached_module_filename, 'rb') as f:
+ zipdata = f.read()
+ except IOError:
+ raise AnsibleError('A different worker process failed to create module file. '
+ 'Look at traceback for that process for debugging information.')
+ zipdata = to_text(zipdata, errors='surrogate_or_strict')
+
+ o_interpreter, o_args = _extract_interpreter(b_module_data)
+ if o_interpreter is None:
+ o_interpreter = u'/usr/bin/python'
+
+ shebang, interpreter = _get_shebang(o_interpreter, task_vars, templar, o_args, remote_is_local=remote_is_local)
+
+ # FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
+ rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
+
+ if not isinstance(rlimit_nofile, int):
+ rlimit_nofile = int(templar.template(rlimit_nofile))
+
+ if rlimit_nofile:
+ rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
+ rlimit_nofile=rlimit_nofile,
+ )
+ else:
+ rlimit = ''
+
+ coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
+
+ if coverage_config:
+ coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
+
+ if coverage_output:
+ # Enable code coverage analysis of the module.
+ # This feature is for internal testing and may change without notice.
+ coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
+ coverage_config=coverage_config,
+ coverage_output=coverage_output,
+ )
+ else:
+ # Verify coverage is available without importing it.
+ # This will detect when a module would fail with coverage enabled with minimal overhead.
+ coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
+ else:
+ coverage = ''
+
+ now = datetime.datetime.utcnow()
+ output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
+ zipdata=zipdata,
+ ansible_module=module_name,
+ module_fqn=remote_module_fqn,
+ params=python_repred_params,
+ shebang=shebang,
+ coding=ENCODING_STRING,
+ year=now.year,
+ month=now.month,
+ day=now.day,
+ hour=now.hour,
+ minute=now.minute,
+ second=now.second,
+ coverage=coverage,
+ rlimit=rlimit,
+ )))
+ b_module_data = output.getvalue()
+
+ elif module_substyle == 'powershell':
+ # Powershell/winrm don't actually make use of shebang so we can
+ # safely set this here. If we let the fallback code handle this
+ # it can fail in the presence of the UTF8 BOM commonly added by
+ # Windows text editors
+ shebang = u'#!powershell'
+ # create the common exec wrapper payload and set that as the module_data
+ # bytes
+ b_module_data = ps_manifest._create_powershell_wrapper(
+ b_module_data, module_path, module_args, environment,
+ async_timeout, become, become_method, become_user, become_password,
+ become_flags, module_substyle, task_vars, remote_module_fqn
+ )
+
+ elif module_substyle == 'jsonargs':
+ module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True))
+
+ # these strings could be included in a third-party module but
+ # officially they were included in the 'basic' snippet for new-style
+ # python modules (which has been replaced with something else in
+ # ansiballz) If we remove them from jsonargs-style module replacer
+ # then we can remove them everywhere.
+ python_repred_args = to_bytes(repr(module_args_json))
+ b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
+ b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
+ b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
+
+ # The main event -- substitute the JSON args string into the module
+ b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
+
+ facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
+ b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
+
+ return (b_module_data, module_style, shebang)
+
+
+def _extract_interpreter(b_module_data):
+ """
+ Used to extract shebang expression from binary module data and return a text
+ string with the shebang, or None if no shebang is detected.
+ """
+
+ interpreter = None
+ args = []
+ b_lines = b_module_data.split(b"\n", 1)
+ if b_lines[0].startswith(b"#!"):
+ b_shebang = b_lines[0].strip()
+
+ # shlex.split needs text on Python 3
+ cli_split = shlex.split(to_text(b_shebang[2:], errors='surrogate_or_strict'))
+
+ # convert args to text
+ cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split]
+ interpreter = cli_split[0]
+ args = cli_split[1:]
+
+ return interpreter, args
+
+
+def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
+ become_method=None, become_user=None, become_password=None, become_flags=None, environment=None, remote_is_local=False):
+ """
+ Used to insert chunks of code into modules before transfer rather than
+ doing regular python imports. This allows for more efficient transfer in
+ a non-bootstrapping scenario by not moving extra files over the wire and
+ also takes care of embedding arguments in the transferred modules.
+
+ This version is done in such a way that local imports can still be
+ used in the module code, so IDEs don't have to be aware of what is going on.
+
+ Example:
+
+ from ansible.module_utils.basic import *
+
+ ... will result in the insertion of basic.py into the module
+ from the module_utils/ directory in the source tree.
+
+ For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
+ properties not available here.
+
+ """
+ task_vars = {} if task_vars is None else task_vars
+ environment = {} if environment is None else environment
+
+ with open(module_path, 'rb') as f:
+
+ # read in the module source
+ b_module_data = f.read()
+
+ (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
+ async_timeout=async_timeout, become=become, become_method=become_method,
+ become_user=become_user, become_password=become_password, become_flags=become_flags,
+ environment=environment, remote_is_local=remote_is_local)
+
+ if module_style == 'binary':
+ return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
+ elif shebang is None:
+ interpreter, args = _extract_interpreter(b_module_data)
+ # No interpreter/shebang, assume a binary module?
+ if interpreter is not None:
+
+ shebang, new_interpreter = _get_shebang(interpreter, task_vars, templar, args, remote_is_local=remote_is_local)
+
+ # update shebang
+ b_lines = b_module_data.split(b"\n", 1)
+
+ if interpreter != new_interpreter:
+ b_lines[0] = to_bytes(shebang, errors='surrogate_or_strict', nonstring='passthru')
+
+ if os.path.basename(interpreter).startswith(u'python'):
+ b_lines.insert(1, b_ENCODING_STRING)
+
+ b_module_data = b"\n".join(b_lines)
+
+ return (b_module_data, module_style, shebang)
+
+
+def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None, action_groups=None):
+ if redirected_names:
+ resolved_action_name = redirected_names[-1]
+ else:
+ resolved_action_name = action
+
+ if redirected_names is not None:
+ msg = (
+ "Finding module_defaults for the action %s. "
+ "The caller passed a list of redirected action names, which is deprecated. "
+ "The task's resolved action should be provided as the first argument instead."
+ )
+ display.deprecated(msg % resolved_action_name, version='2.16')
+
+ # Get the list of groups that contain this action
+ if action_groups is None:
+ msg = (
+ "Finding module_defaults for action %s. "
+ "The caller has not passed the action_groups, so any "
+ "that may include this action will be ignored."
+ )
+ display.warning(msg=msg)
+ group_names = []
+ else:
+ group_names = action_groups.get(resolved_action_name, [])
+
+ tmp_args = {}
+ module_defaults = {}
+
+ # Merge latest defaults into dict, since they are a list of dicts
+ if isinstance(defaults, list):
+ for default in defaults:
+ module_defaults.update(default)
+
+ # module_defaults keys are static, but the values may be templated
+ module_defaults = templar.template(module_defaults)
+ for default in module_defaults:
+ if default.startswith('group/'):
+ group_name = default.split('group/')[-1]
+ if group_name in group_names:
+ tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
+
+ # handle specific action defaults
+ tmp_args.update(module_defaults.get(resolved_action_name, {}).copy())
+
+ # direct args override all
+ tmp_args.update(args)
+
+ return tmp_args
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
new file mode 100644
index 0000000..2449782
--- /dev/null
+++ b/lib/ansible/executor/play_iterator.py
@@ -0,0 +1,652 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+
+from enum import IntEnum, IntFlag
+
+from ansible import constants as C
+from ansible.errors import AnsibleAssertionError
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
+
+
+class IteratingStates(IntEnum):
+ SETUP = 0
+ TASKS = 1
+ RESCUE = 2
+ ALWAYS = 3
+ HANDLERS = 4
+ COMPLETE = 5
+
+
+class FailedStates(IntFlag):
+ NONE = 0
+ SETUP = 1
+ TASKS = 2
+ RESCUE = 4
+ ALWAYS = 8
+ HANDLERS = 16
+
+
+class HostState:
+ def __init__(self, blocks):
+ self._blocks = blocks[:]
+ self.handlers = []
+
+ self.cur_block = 0
+ self.cur_regular_task = 0
+ self.cur_rescue_task = 0
+ self.cur_always_task = 0
+ self.cur_handlers_task = 0
+ self.run_state = IteratingStates.SETUP
+ self.fail_state = FailedStates.NONE
+ self.pre_flushing_run_state = None
+ self.update_handlers = True
+ self.pending_setup = False
+ self.tasks_child_state = None
+ self.rescue_child_state = None
+ self.always_child_state = None
+ self.did_rescue = False
+ self.did_start_at_task = False
+
+ def __repr__(self):
+ return "HostState(%r)" % self._blocks
+
+ def __str__(self):
+ return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, handlers=%d, run_state=%s, fail_state=%s, "
+ "pre_flushing_run_state=%s, update_handlers=%s, pending_setup=%s, "
+ "tasks child state? (%s), rescue child state? (%s), always child state? (%s), "
+ "did rescue? %s, did start at task? %s" % (
+ self.cur_block,
+ self.cur_regular_task,
+ self.cur_rescue_task,
+ self.cur_always_task,
+ self.cur_handlers_task,
+ self.run_state,
+ self.fail_state,
+ self.pre_flushing_run_state,
+ self.update_handlers,
+ self.pending_setup,
+ self.tasks_child_state,
+ self.rescue_child_state,
+ self.always_child_state,
+ self.did_rescue,
+ self.did_start_at_task,
+ ))
+
+ def __eq__(self, other):
+ if not isinstance(other, HostState):
+ return False
+
+ for attr in ('_blocks',
+ 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'cur_handlers_task',
+ 'run_state', 'fail_state', 'pre_flushing_run_state', 'update_handlers', 'pending_setup',
+ 'tasks_child_state', 'rescue_child_state', 'always_child_state'):
+ if getattr(self, attr) != getattr(other, attr):
+ return False
+
+ return True
+
+ def get_current_block(self):
+ return self._blocks[self.cur_block]
+
+ def copy(self):
+ new_state = HostState(self._blocks)
+ new_state.handlers = self.handlers[:]
+ new_state.cur_block = self.cur_block
+ new_state.cur_regular_task = self.cur_regular_task
+ new_state.cur_rescue_task = self.cur_rescue_task
+ new_state.cur_always_task = self.cur_always_task
+ new_state.cur_handlers_task = self.cur_handlers_task
+ new_state.run_state = self.run_state
+ new_state.fail_state = self.fail_state
+ new_state.pre_flushing_run_state = self.pre_flushing_run_state
+ new_state.update_handlers = self.update_handlers
+ new_state.pending_setup = self.pending_setup
+ new_state.did_rescue = self.did_rescue
+ new_state.did_start_at_task = self.did_start_at_task
+ if self.tasks_child_state is not None:
+ new_state.tasks_child_state = self.tasks_child_state.copy()
+ if self.rescue_child_state is not None:
+ new_state.rescue_child_state = self.rescue_child_state.copy()
+ if self.always_child_state is not None:
+ new_state.always_child_state = self.always_child_state.copy()
+ return new_state
+
+
+class PlayIterator:
+
+ def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
+ self._play = play
+ self._blocks = []
+ self._variable_manager = variable_manager
+
+ setup_block = Block(play=self._play)
+ # Gathering facts with run_once would copy the facts from one host to
+ # the others.
+ setup_block.run_once = False
+ setup_task = Task(block=setup_block)
+ setup_task.action = 'gather_facts'
+ # TODO: hardcoded resolution here, but should use actual resolution code in the end,
+ # in case of 'legacy' mismatch
+ setup_task.resolved_action = 'ansible.builtin.gather_facts'
+ setup_task.name = 'Gathering Facts'
+ setup_task.args = {}
+
+ # Unless play is specifically tagged, gathering should 'always' run
+ if not self._play.tags:
+ setup_task.tags = ['always']
+
+ # Default options to gather
+ for option in ('gather_subset', 'gather_timeout', 'fact_path'):
+ value = getattr(self._play, option, None)
+ if value is not None:
+ setup_task.args[option] = value
+
+ setup_task.set_loader(self._play._loader)
+ # short circuit fact gathering if the entire playbook is conditional
+ if self._play._included_conditional is not None:
+ setup_task.when = self._play._included_conditional[:]
+ setup_block.block = [setup_task]
+
+ setup_block = setup_block.filter_tagged_tasks(all_vars)
+ self._blocks.append(setup_block)
+
+ # keep flatten (no blocks) list of all tasks from the play
+ # used for the lockstep mechanism in the linear strategy
+ self.all_tasks = setup_block.get_tasks()
+
+ for block in self._play.compile():
+ new_block = block.filter_tagged_tasks(all_vars)
+ if new_block.has_tasks():
+ self._blocks.append(new_block)
+ self.all_tasks.extend(new_block.get_tasks())
+
+ # keep list of all handlers, it is copied into each HostState
+ # at the beginning of IteratingStates.HANDLERS
+ # the copy happens at each flush in order to restore the original
+ # list and remove any included handlers that might not be notified
+ # at the particular flush
+ self.handlers = [h for b in self._play.handlers for h in b.block]
+
+ self._host_states = {}
+ start_at_matched = False
+ batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
+ self.batch_size = len(batch)
+ for host in batch:
+ self.set_state_for_host(host.name, HostState(blocks=self._blocks))
+ # if we're looking to start at a specific task, iterate through
+ # the tasks for this host until we find the specified task
+ if play_context.start_at_task is not None and not start_at_done:
+ while True:
+ (s, task) = self.get_next_task_for_host(host, peek=True)
+ if s.run_state == IteratingStates.COMPLETE:
+ break
+ if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
+ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
+ start_at_matched = True
+ break
+ self.set_state_for_host(host.name, s)
+
+ # finally, reset the host's state to IteratingStates.SETUP
+ if start_at_matched:
+ self._host_states[host.name].did_start_at_task = True
+ self._host_states[host.name].run_state = IteratingStates.SETUP
+
+ if start_at_matched:
+ # we have our match, so clear the start_at_task field on the
+ # play context to flag that we've started at a task (and future
+ # plays won't try to advance)
+ play_context.start_at_task = None
+
+ self.end_play = False
+ self.cur_task = 0
+
+ def get_host_state(self, host):
+ # Since we're using the PlayIterator to carry forward failed hosts,
+ # in the event that a previous host was not in the current inventory
+ # we create a stub state for it now
+ if host.name not in self._host_states:
+ self.set_state_for_host(host.name, HostState(blocks=[]))
+
+ return self._host_states[host.name].copy()
+
+ def cache_block_tasks(self, block):
+ display.deprecated(
+ 'PlayIterator.cache_block_tasks is now noop due to the changes '
+ 'in the way tasks are cached and is deprecated.',
+ version=2.16
+ )
+
+ def get_next_task_for_host(self, host, peek=False):
+
+ display.debug("getting the next task for host %s" % host.name)
+ s = self.get_host_state(host)
+
+ task = None
+ if s.run_state == IteratingStates.COMPLETE:
+ display.debug("host %s is done iterating, returning" % host.name)
+ return (s, None)
+
+ (s, task) = self._get_next_task_from_state(s, host=host)
+
+ if not peek:
+ self.set_state_for_host(host.name, s)
+
+ display.debug("done getting next task for host %s" % host.name)
+ display.debug(" ^ task is: %s" % task)
+ display.debug(" ^ state is: %s" % s)
+ return (s, task)
+
+ def _get_next_task_from_state(self, state, host):
+
+ task = None
+
+ # try and find the next task, given the current state.
+ while True:
+ # try to get the current block from the list of blocks, and
+ # if we run past the end of the list we know we're done with
+ # this block
+ try:
+ block = state._blocks[state.cur_block]
+ except IndexError:
+ state.run_state = IteratingStates.COMPLETE
+ return (state, None)
+
+ if state.run_state == IteratingStates.SETUP:
+ # First, we check to see if we were pending setup. If not, this is
+ # the first trip through IteratingStates.SETUP, so we set the pending_setup
+ # flag and try to determine if we do in fact want to gather facts for
+ # the specified host.
+ if not state.pending_setup:
+ state.pending_setup = True
+
+ # Gather facts if the default is 'smart' and we have not yet
+ # done it for this host; or if 'explicit' and the play sets
+ # gather_facts to True; or if 'implicit' and the play does
+ # NOT explicitly set gather_facts to False.
+
+ gathering = C.DEFAULT_GATHERING
+ implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
+
+ if (gathering == 'implicit' and implied) or \
+ (gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
+ (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
+ # The setup block is always self._blocks[0], as we inject it
+ # during the play compilation in __init__ above.
+ setup_block = self._blocks[0]
+ if setup_block.has_tasks() and len(setup_block.block) > 0:
+ task = setup_block.block[0]
+ else:
+ # This is the second trip through IteratingStates.SETUP, so we clear
+ # the flag and move onto the next block in the list while setting
+ # the run state to IteratingStates.TASKS
+ state.pending_setup = False
+
+ state.run_state = IteratingStates.TASKS
+ if not state.did_start_at_task:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.tasks_child_state = None
+ state.rescue_child_state = None
+ state.always_child_state = None
+
+ elif state.run_state == IteratingStates.TASKS:
+ # clear the pending setup flag, since we're past that and it didn't fail
+ if state.pending_setup:
+ state.pending_setup = False
+
+ # First, we check for a child task state that is not failed, and if we
+ # have one recurse into it for the next task. If we're done with the child
+ # state, we clear it and drop back to getting the next task from the list.
+ if state.tasks_child_state:
+ (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
+ if self._check_failed_state(state.tasks_child_state):
+ # failed child state, so clear it and move into the rescue portion
+ state.tasks_child_state = None
+ self._set_failed_state(state)
+ else:
+ # get the next task recursively
+ if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
+ # we're done with the child state, so clear it and continue
+ # back to the top of the loop to get the next task
+ state.tasks_child_state = None
+ continue
+ else:
+ # First here, we check to see if we've failed anywhere down the chain
+ # of states we have, and if so we move onto the rescue portion. Otherwise,
+ # we check to see if we've moved past the end of the list of tasks. If so,
+ # we move into the always portion of the block, otherwise we get the next
+ # task from the list.
+ if self._check_failed_state(state):
+ state.run_state = IteratingStates.RESCUE
+ elif state.cur_regular_task >= len(block.block):
+ state.run_state = IteratingStates.ALWAYS
+ else:
+ task = block.block[state.cur_regular_task]
+ # if the current task is actually a child block, create a child
+ # state for us to recurse into on the next pass
+ if isinstance(task, Block):
+ state.tasks_child_state = HostState(blocks=[task])
+ state.tasks_child_state.run_state = IteratingStates.TASKS
+ # since we've created the child state, clear the task
+ # so we can pick up the child state on the next pass
+ task = None
+ state.cur_regular_task += 1
+
+ elif state.run_state == IteratingStates.RESCUE:
+ # The process here is identical to IteratingStates.TASKS, except instead
+ # we move into the always portion of the block.
+ if state.rescue_child_state:
+ (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
+ if self._check_failed_state(state.rescue_child_state):
+ state.rescue_child_state = None
+ self._set_failed_state(state)
+ else:
+ if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
+ state.rescue_child_state = None
+ continue
+ else:
+ if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
+ state.run_state = IteratingStates.ALWAYS
+ elif state.cur_rescue_task >= len(block.rescue):
+ if len(block.rescue) > 0:
+ state.fail_state = FailedStates.NONE
+ state.run_state = IteratingStates.ALWAYS
+ state.did_rescue = True
+ else:
+ task = block.rescue[state.cur_rescue_task]
+ if isinstance(task, Block):
+ state.rescue_child_state = HostState(blocks=[task])
+ state.rescue_child_state.run_state = IteratingStates.TASKS
+ task = None
+ state.cur_rescue_task += 1
+
+ elif state.run_state == IteratingStates.ALWAYS:
+ # And again, the process here is identical to IteratingStates.TASKS, except
+ # instead we either move onto the next block in the list, or we set the
+ # run state to IteratingStates.COMPLETE in the event of any errors, or when we
+ # have hit the end of the list of blocks.
+ if state.always_child_state:
+ (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
+ if self._check_failed_state(state.always_child_state):
+ state.always_child_state = None
+ self._set_failed_state(state)
+ else:
+ if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
+ state.always_child_state = None
+ continue
+ else:
+ if state.cur_always_task >= len(block.always):
+ if state.fail_state != FailedStates.NONE:
+ state.run_state = IteratingStates.COMPLETE
+ else:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.run_state = IteratingStates.TASKS
+ state.tasks_child_state = None
+ state.rescue_child_state = None
+ state.always_child_state = None
+ state.did_rescue = False
+ else:
+ task = block.always[state.cur_always_task]
+ if isinstance(task, Block):
+ state.always_child_state = HostState(blocks=[task])
+ state.always_child_state.run_state = IteratingStates.TASKS
+ task = None
+ state.cur_always_task += 1
+
+ elif state.run_state == IteratingStates.HANDLERS:
+ if state.update_handlers:
+ # reset handlers for HostState since handlers from include_tasks
+ # might be there from previous flush
+ state.handlers = self.handlers[:]
+ state.update_handlers = False
+ state.cur_handlers_task = 0
+
+ if state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
+ state.update_handlers = True
+ state.run_state = IteratingStates.COMPLETE
+ else:
+ while True:
+ try:
+ task = state.handlers[state.cur_handlers_task]
+ except IndexError:
+ task = None
+ state.run_state = state.pre_flushing_run_state
+ state.update_handlers = True
+ break
+ else:
+ state.cur_handlers_task += 1
+ if task.is_host_notified(host):
+ break
+
+ elif state.run_state == IteratingStates.COMPLETE:
+ return (state, None)
+
+ # if something above set the task, break out of the loop now
+ if task:
+ break
+
+ return (state, task)
+
+ def _set_failed_state(self, state):
+ if state.run_state == IteratingStates.SETUP:
+ state.fail_state |= FailedStates.SETUP
+ state.run_state = IteratingStates.COMPLETE
+ elif state.run_state == IteratingStates.TASKS:
+ if state.tasks_child_state is not None:
+ state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
+ else:
+ state.fail_state |= FailedStates.TASKS
+ if state._blocks[state.cur_block].rescue:
+ state.run_state = IteratingStates.RESCUE
+ elif state._blocks[state.cur_block].always:
+ state.run_state = IteratingStates.ALWAYS
+ else:
+ state.run_state = IteratingStates.COMPLETE
+ elif state.run_state == IteratingStates.RESCUE:
+ if state.rescue_child_state is not None:
+ state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
+ else:
+ state.fail_state |= FailedStates.RESCUE
+ if state._blocks[state.cur_block].always:
+ state.run_state = IteratingStates.ALWAYS
+ else:
+ state.run_state = IteratingStates.COMPLETE
+ elif state.run_state == IteratingStates.ALWAYS:
+ if state.always_child_state is not None:
+ state.always_child_state = self._set_failed_state(state.always_child_state)
+ else:
+ state.fail_state |= FailedStates.ALWAYS
+ state.run_state = IteratingStates.COMPLETE
+ elif state.run_state == IteratingStates.HANDLERS:
+ state.fail_state |= FailedStates.HANDLERS
+ state.update_handlers = True
+ if state._blocks[state.cur_block].rescue:
+ state.run_state = IteratingStates.RESCUE
+ elif state._blocks[state.cur_block].always:
+ state.run_state = IteratingStates.ALWAYS
+ else:
+ state.run_state = IteratingStates.COMPLETE
+ return state
+
+ def mark_host_failed(self, host):
+ s = self.get_host_state(host)
+ display.debug("marking host %s failed, current state: %s" % (host, s))
+ s = self._set_failed_state(s)
+ display.debug("^ failed state is now: %s" % s)
+ self.set_state_for_host(host.name, s)
+ self._play._removed_hosts.append(host.name)
+
+ def get_failed_hosts(self):
+ return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
+
+ def _check_failed_state(self, state):
+ if state is None:
+ return False
+ elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
+ return True
+ elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
+ return True
+ elif state.run_state == IteratingStates.HANDLERS and state.fail_state & FailedStates.HANDLERS == FailedStates.HANDLERS:
+ return True
+ elif state.fail_state != FailedStates.NONE:
+ if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
+ return False
+ elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
+ return False
+ else:
+ return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
+ elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
+ cur_block = state._blocks[state.cur_block]
+ if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
+ return False
+ else:
+ return True
+ return False
+
+ def is_failed(self, host):
+ s = self.get_host_state(host)
+ return self._check_failed_state(s)
+
+ def clear_host_errors(self, host):
+ self._clear_state_errors(self.get_state_for_host(host.name))
+
+ def _clear_state_errors(self, state: HostState) -> None:
+ state.fail_state = FailedStates.NONE
+
+ if state.tasks_child_state is not None:
+ self._clear_state_errors(state.tasks_child_state)
+ elif state.rescue_child_state is not None:
+ self._clear_state_errors(state.rescue_child_state)
+ elif state.always_child_state is not None:
+ self._clear_state_errors(state.always_child_state)
+
+ def get_active_state(self, state):
+ '''
+ Finds the active state, recursively if necessary when there are child states.
+ '''
+ if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
+ return self.get_active_state(state.tasks_child_state)
+ elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
+ return self.get_active_state(state.rescue_child_state)
+ elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
+ return self.get_active_state(state.always_child_state)
+ return state
+
+ def is_any_block_rescuing(self, state):
+ '''
+ Given the current HostState state, determines if the current block, or any child blocks,
+ are in rescue mode.
+ '''
+ if state.run_state == IteratingStates.TASKS and state.get_current_block().rescue:
+ return True
+ if state.tasks_child_state is not None:
+ return self.is_any_block_rescuing(state.tasks_child_state)
+ if state.rescue_child_state is not None:
+ return self.is_any_block_rescuing(state.rescue_child_state)
+ if state.always_child_state is not None:
+ return self.is_any_block_rescuing(state.always_child_state)
+ return False
+
+ def get_original_task(self, host, task):
+ display.deprecated(
+ 'PlayIterator.get_original_task is now noop due to the changes '
+ 'in the way tasks are cached and is deprecated.',
+ version=2.16
+ )
+ return (None, None)
+
+ def _insert_tasks_into_state(self, state, task_list):
+ # if we've failed at all, or if the task list is empty, just return the current state
+ if (state.fail_state != FailedStates.NONE and state.run_state == IteratingStates.TASKS) or not task_list:
+ return state
+
+ if state.run_state == IteratingStates.TASKS:
+ if state.tasks_child_state:
+ state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.block[:state.cur_regular_task]
+ after = target_block.block[state.cur_regular_task:]
+ target_block.block = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == IteratingStates.RESCUE:
+ if state.rescue_child_state:
+ state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.rescue[:state.cur_rescue_task]
+ after = target_block.rescue[state.cur_rescue_task:]
+ target_block.rescue = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == IteratingStates.ALWAYS:
+ if state.always_child_state:
+ state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.always[:state.cur_always_task]
+ after = target_block.always[state.cur_always_task:]
+ target_block.always = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == IteratingStates.HANDLERS:
+ state.handlers[state.cur_handlers_task:state.cur_handlers_task] = [h for b in task_list for h in b.block]
+
+ return state
+
+ def add_tasks(self, host, task_list):
+ self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
+
+ @property
+ def host_states(self):
+ return self._host_states
+
+ def get_state_for_host(self, hostname: str) -> HostState:
+ return self._host_states[hostname]
+
+ def set_state_for_host(self, hostname: str, state: HostState) -> None:
+ if not isinstance(state, HostState):
+ raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
+ self._host_states[hostname] = state
+
+ def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
+ if not isinstance(run_state, IteratingStates):
+ raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
+ self._host_states[hostname].run_state = run_state
+
+ def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
+ if not isinstance(fail_state, FailedStates):
+ raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
+ self._host_states[hostname].fail_state = fail_state
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
new file mode 100644
index 0000000..e8b2a3d
--- /dev/null
+++ b/lib/ansible/executor/playbook_executor.py
@@ -0,0 +1,335 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible import context
+from ansible.executor.task_queue_manager import TaskQueueManager, AnsibleEndPlay
+from ansible.module_utils._text import to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.loader import become_loader, connection_loader, shell_loader
+from ansible.playbook import Playbook
+from ansible.template import Templar
+from ansible.utils.helpers import pct_to_int
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
+from ansible.utils.path import makedirs_safe
+from ansible.utils.ssh_functions import set_default_transport
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class PlaybookExecutor:
+
+ '''
+ This is the primary class for executing playbooks, and thus the
+ basis for bin/ansible-playbook operation.
+ '''
+
+ def __init__(self, playbooks, inventory, variable_manager, loader, passwords):
+ self._playbooks = playbooks
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self.passwords = passwords
+ self._unreachable_hosts = dict()
+
+ if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \
+ context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'):
+ self._tqm = None
+ else:
+ self._tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ passwords=self.passwords,
+ forks=context.CLIARGS.get('forks'),
+ )
+
+ # Note: We run this here to cache whether the default ansible ssh
+ # executable supports control persist. Sometime in the future we may
+ # need to enhance this to check that ansible_ssh_executable specified
+ # in inventory is also cached. We can't do this caching at the point
+ # where it is used (in task_executor) because that is post-fork and
+ # therefore would be discarded after every task.
+ set_default_transport()
+
+ def run(self):
+ '''
+ Run the given playbook, based on the settings in the play which
+ may limit the runs to serialized groups, etc.
+ '''
+
+ result = 0
+ entrylist = []
+ entry = {}
+ try:
+ # preload become/connection/shell to set config defs cached
+ list(connection_loader.all(class_only=True))
+ list(shell_loader.all(class_only=True))
+ list(become_loader.all(class_only=True))
+
+ for playbook in self._playbooks:
+
+ # deal with FQCN
+ resource = _get_collection_playbook_path(playbook)
+ if resource is not None:
+ playbook_path = resource[1]
+ playbook_collection = resource[2]
+ else:
+ playbook_path = playbook
+ # not fqcn, but might still be colleciotn playbook
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ if playbook_collection:
+ display.warning("running playbook inside collection {0}".format(playbook_collection))
+ AnsibleCollectionConfig.default_collection = playbook_collection
+ else:
+ AnsibleCollectionConfig.default_collection = None
+
+ pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
+ # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
+
+ if self._tqm is None: # we are doing a listing
+ entry = {'playbook': playbook_path}
+ entry['plays'] = []
+ else:
+ # make sure the tqm has callbacks loaded
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', pb)
+
+ i = 1
+ plays = pb.get_plays()
+ display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
+
+ for play in plays:
+ if play._included_path is not None:
+ self._loader.set_basedir(play._included_path)
+ else:
+ self._loader.set_basedir(pb._basedir)
+
+ # clear any filters which may have been applied to the inventory
+ self._inventory.remove_restriction()
+
+ # Allow variables to be used in vars_prompt fields.
+ all_vars = self._variable_manager.get_vars(play=play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
+
+ # FIXME: this should be a play 'sub object' like loop_control
+ if play.vars_prompt:
+ for var in play.vars_prompt:
+ vname = var['name']
+ prompt = var.get("prompt", vname)
+ default = var.get("default", None)
+ private = boolean(var.get("private", True))
+ confirm = boolean(var.get("confirm", False))
+ encrypt = var.get("encrypt", None)
+ salt_size = var.get("salt_size", None)
+ salt = var.get("salt", None)
+ unsafe = var.get("unsafe", None)
+
+ if vname not in self._variable_manager.extra_vars:
+ if self._tqm:
+ self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
+ default, unsafe)
+ play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
+ else: # we are either in --list-<option> or syntax check
+ play.vars[vname] = default
+
+ # Post validate so any play level variables are templated
+ all_vars = self._variable_manager.get_vars(play=play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ play.post_validate(templar)
+
+ if context.CLIARGS['syntax']:
+ continue
+
+ if self._tqm is None:
+ # we are just doing a listing
+ entry['plays'].append(play)
+
+ else:
+ self._tqm._unreachable_hosts.update(self._unreachable_hosts)
+
+ previously_failed = len(self._tqm._failed_hosts)
+ previously_unreachable = len(self._tqm._unreachable_hosts)
+
+ break_play = False
+ # we are actually running plays
+ batches = self._get_serialized_batches(play)
+ if len(batches) == 0:
+ self._tqm.send_callback('v2_playbook_on_play_start', play)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
+ for batch in batches:
+ # restrict the inventory to the hosts in the serialized batch
+ self._inventory.restrict_to_hosts(batch)
+ # and run it...
+ try:
+ result = self._tqm.run(play=play)
+ except AnsibleEndPlay as e:
+ result = e.result
+ break
+
+ # break the play if the result equals the special return code
+ if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
+ result = self._tqm.RUN_FAILED_HOSTS
+ break_play = True
+
+ # check the number of failures here, to see if they're above the maximum
+ # failure percentage allowed, or if any errors are fatal. If either of those
+ # conditions are met, we break out, otherwise we only break out if the entire
+ # batch failed
+ failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
+ (previously_failed + previously_unreachable)
+
+ if len(batch) == failed_hosts_count:
+ break_play = True
+ break
+
+ # update the previous counts so they don't accumulate incorrectly
+ # over multiple serial batches
+ previously_failed += len(self._tqm._failed_hosts) - previously_failed
+ previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
+
+ # save the unreachable hosts from this batch
+ self._unreachable_hosts.update(self._tqm._unreachable_hosts)
+
+ if break_play:
+ break
+
+ i = i + 1 # per play
+
+ if entry:
+ entrylist.append(entry) # per playbook
+
+ # send the stats callback for this playbook
+ if self._tqm is not None:
+ if C.RETRY_FILES_ENABLED:
+ retries = set(self._tqm._failed_hosts.keys())
+ retries.update(self._tqm._unreachable_hosts.keys())
+ retries = sorted(retries)
+ if len(retries) > 0:
+ if C.RETRY_FILES_SAVE_PATH:
+ basedir = C.RETRY_FILES_SAVE_PATH
+ elif playbook_path:
+ basedir = os.path.dirname(os.path.abspath(playbook_path))
+ else:
+ basedir = '~/'
+
+ (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
+ filename = os.path.join(basedir, "%s.retry" % retry_name)
+ if self._generate_retry_inventory(filename, retries):
+ display.display("\tto retry, use: --limit @%s\n" % filename)
+
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
+ # if the last result wasn't zero, break out of the playbook file name loop
+ if result != 0:
+ break
+
+ if entrylist:
+ return entrylist
+
+ finally:
+ if self._tqm is not None:
+ self._tqm.cleanup()
+ if self._loader:
+ self._loader.cleanup_all_tmp_files()
+
+ if context.CLIARGS['syntax']:
+ display.display("No issues encountered")
+ return result
+
+ if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
+ display.error(
+ "No matching task \"%s\" found."
+ " Note: --start-at-task can only follow static includes."
+ % context.CLIARGS['start_at_task']
+ )
+
+ return result
+
+ def _get_serialized_batches(self, play):
+ '''
+ Returns a list of hosts, subdivided into batches based on
+ the serial size specified in the play.
+ '''
+
+ # make sure we have a unique list of hosts
+ all_hosts = self._inventory.get_hosts(play.hosts, order=play.order)
+ all_hosts_len = len(all_hosts)
+
+ # the serial value can be listed as a scalar or a list of
+ # scalars, so we make sure it's a list here
+ serial_batch_list = play.serial
+ if len(serial_batch_list) == 0:
+ serial_batch_list = [-1]
+
+ cur_item = 0
+ serialized_batches = []
+
+ while len(all_hosts) > 0:
+ # get the serial value from current item in the list
+ serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
+
+ # if the serial count was not specified or is invalid, default to
+ # a list of all hosts, otherwise grab a chunk of the hosts equal
+ # to the current serial item size
+ if serial <= 0:
+ serialized_batches.append(all_hosts)
+ break
+ else:
+ play_hosts = []
+ for x in range(serial):
+ if len(all_hosts) > 0:
+ play_hosts.append(all_hosts.pop(0))
+
+ serialized_batches.append(play_hosts)
+
+ # increment the current batch list item number, and if we've hit
+ # the end keep using the last element until we've consumed all of
+ # the hosts in the inventory
+ cur_item += 1
+ if cur_item > len(serial_batch_list) - 1:
+ cur_item = len(serial_batch_list) - 1
+
+ return serialized_batches
+
+ def _generate_retry_inventory(self, retry_path, replay_hosts):
+ '''
+ Called when a playbook run fails. It generates an inventory which allows
+ re-running on ONLY the failed hosts. This may duplicate some variable
+ information in group_vars/host_vars but that is ok, and expected.
+ '''
+ try:
+ makedirs_safe(os.path.dirname(retry_path))
+ with open(retry_path, 'w') as fd:
+ for x in replay_hosts:
+ fd.write("%s\n" % x)
+ except Exception as e:
+ display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e)))
+ return False
+
+ return True
diff --git a/lib/ansible/executor/powershell/__init__.py b/lib/ansible/executor/powershell/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/executor/powershell/__init__.py
diff --git a/lib/ansible/executor/powershell/async_watchdog.ps1 b/lib/ansible/executor/powershell/async_watchdog.ps1
new file mode 100644
index 0000000..c2138e3
--- /dev/null
+++ b/lib/ansible/executor/powershell/async_watchdog.ps1
@@ -0,0 +1,117 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+# help with debugging errors as we don't have visibility of this running process
+trap {
+ $watchdog_path = "$($env:TEMP)\ansible-async-watchdog-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
+ $error_msg = "Error while running the async exec wrapper`r`n$(Format-AnsibleException -ErrorRecord $_)"
+ Set-Content -Path $watchdog_path -Value $error_msg
+ break
+}
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting async_watchdog" "async_watchdog"
+
+# pop 0th action as entrypoint
+$payload.actions = $payload.actions[1..99]
+
+$actions = $Payload.actions
+$entrypoint = $payload.($actions[0])
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
+
+$resultfile_path = $payload.async_results_path
+$max_exec_time_sec = $payload.async_timeout_sec
+
+Write-AnsibleLog "INFO - deserializing existing result file args at: '$resultfile_path'" "async_watchdog"
+if (-not (Test-Path -Path $resultfile_path)) {
+ $msg = "result file at '$resultfile_path' does not exist"
+ Write-AnsibleLog "ERROR - $msg" "async_watchdog"
+ throw $msg
+}
+$result_json = Get-Content -Path $resultfile_path -Raw
+Write-AnsibleLog "INFO - result file json is: $result_json" "async_watchdog"
+$result = ConvertFrom-AnsibleJson -InputObject $result_json
+
+Write-AnsibleLog "INFO - creating async runspace" "async_watchdog"
+$rs = [RunspaceFactory]::CreateRunspace()
+$rs.Open()
+
+Write-AnsibleLog "INFO - creating async PowerShell pipeline" "async_watchdog"
+$ps = [PowerShell]::Create()
+$ps.Runspace = $rs
+
+# these functions are set in exec_wrapper
+Write-AnsibleLog "INFO - adding global functions to PowerShell pipeline script" "async_watchdog"
+$ps.AddScript($script:common_functions).AddStatement() > $null
+$ps.AddScript($script:wrapper_functions).AddStatement() > $null
+$function_params = @{
+ Name = "common_functions"
+ Value = $script:common_functions
+ Scope = "script"
+}
+$ps.AddCommand("Set-Variable").AddParameters($function_params).AddStatement() > $null
+
+Write-AnsibleLog "INFO - adding $($actions[0]) to PowerShell pipeline script" "async_watchdog"
+$ps.AddScript($entrypoint).AddArgument($payload) > $null
+
+Write-AnsibleLog "INFO - async job start, calling BeginInvoke()" "async_watchdog"
+$job_async_result = $ps.BeginInvoke()
+
+Write-AnsibleLog "INFO - waiting '$max_exec_time_sec' seconds for async job to complete" "async_watchdog"
+$job_async_result.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000) > $null
+$result.finished = 1
+
+if ($job_async_result.IsCompleted) {
+ Write-AnsibleLog "INFO - async job completed, calling EndInvoke()" "async_watchdog"
+
+ $job_output = $ps.EndInvoke($job_async_result)
+ $job_error = $ps.Streams.Error
+
+ Write-AnsibleLog "INFO - raw module stdout:`r`n$($job_output | Out-String)" "async_watchdog"
+ if ($job_error) {
+ Write-AnsibleLog "WARN - raw module stderr:`r`n$($job_error | Out-String)" "async_watchdog"
+ }
+
+ # write success/output/error to result object
+ # TODO: cleanse leading/trailing junk
+ try {
+ Write-AnsibleLog "INFO - deserializing Ansible stdout" "async_watchdog"
+ $module_result = ConvertFrom-AnsibleJson -InputObject $job_output
+ # TODO: check for conflicting keys
+ $result = $result + $module_result
+ }
+ catch {
+ $result.failed = $true
+ $result.msg = "failed to parse module output: $($_.Exception.Message)"
+ # return output back to Ansible to help with debugging errors
+ $result.stdout = $job_output | Out-String
+ $result.stderr = $job_error | Out-String
+ }
+
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content -Path $resultfile_path -Value $result_json
+
+ Write-AnsibleLog "INFO - wrote output to $resultfile_path" "async_watchdog"
+}
+else {
+ Write-AnsibleLog "ERROR - reached timeout on async job, stopping job" "async_watchdog"
+ $ps.BeginStop($null, $null) > $null # best effort stop
+
+ # write timeout to result object
+ $result.failed = $true
+ $result.msg = "timed out waiting for module completion"
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content -Path $resultfile_path -Value $result_json
+
+ Write-AnsibleLog "INFO - wrote timeout to '$resultfile_path'" "async_watchdog"
+}
+
+# in the case of a hung pipeline, this will cause the process to stay alive until it's un-hung...
+#$rs.Close() | Out-Null
+
+Write-AnsibleLog "INFO - ending async_watchdog" "async_watchdog"
diff --git a/lib/ansible/executor/powershell/async_wrapper.ps1 b/lib/ansible/executor/powershell/async_wrapper.ps1
new file mode 100644
index 0000000..0cd640f
--- /dev/null
+++ b/lib/ansible/executor/powershell/async_wrapper.ps1
@@ -0,0 +1,174 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting async_wrapper" "async_wrapper"
+
+if (-not $Payload.environment.ContainsKey("ANSIBLE_ASYNC_DIR")) {
+ Write-AnsibleError -Message "internal error: the environment variable ANSIBLE_ASYNC_DIR is not set and is required for an async task"
+ $host.SetShouldExit(1)
+ return
+}
+$async_dir = [System.Environment]::ExpandEnvironmentVariables($Payload.environment.ANSIBLE_ASYNC_DIR)
+
+# calculate the result path so we can include it in the worker payload
+$jid = $Payload.async_jid
+$local_jid = $jid + "." + $pid
+
+$results_path = [System.IO.Path]::Combine($async_dir, $local_jid)
+
+Write-AnsibleLog "INFO - creating async results path at '$results_path'" "async_wrapper"
+
+$Payload.async_results_path = $results_path
+[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) > $null
+
+# we use Win32_Process to escape the current process job, CreateProcess with a
+# breakaway flag won't work for psrp as the psrp process does not have breakaway
+# rights. Unfortunately we can't read/write to the spawned process as we can't
+# inherit the handles. We use a locked down named pipe to send the exec_wrapper
+# payload. Anonymous pipes won't work as the spawned process will not be a child
+# of the current one and will not be able to inherit the handles
+
+# pop the async_wrapper action so we don't get stuck in a loop and create new
+# exec_wrapper for our async process
+$Payload.actions = $Payload.actions[1..99]
+$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
+
+#
+$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
+$exec_wrapper += "`0`0`0`0" + $payload_json
+$payload_bytes = [System.Text.Encoding]::UTF8.GetBytes($exec_wrapper)
+$pipe_name = "ansible-async-$jid-$([guid]::NewGuid())"
+
+# template the async process command line with the payload details
+$bootstrap_wrapper = {
+ # help with debugging errors as we loose visibility of the process output
+ # from here on
+ trap {
+ $wrapper_path = "$($env:TEMP)\ansible-async-wrapper-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
+ $error_msg = "Error while running the async exec wrapper`r`n$($_ | Out-String)`r`n$($_.ScriptStackTrace)"
+ Set-Content -Path $wrapper_path -Value $error_msg
+ break
+ }
+
+ &chcp.com 65001 > $null
+
+ # store the pipe name and no. of bytes to read, these are populated before
+ # before the process is created - do not remove or changed
+ $pipe_name = ""
+ $bytes_length = 0
+
+ $input_bytes = New-Object -TypeName byte[] -ArgumentList $bytes_length
+ $pipe = New-Object -TypeName System.IO.Pipes.NamedPipeClientStream -ArgumentList @(
+ ".", # localhost
+ $pipe_name,
+ [System.IO.Pipes.PipeDirection]::In,
+ [System.IO.Pipes.PipeOptions]::None,
+ [System.Security.Principal.TokenImpersonationLevel]::Anonymous
+ )
+ try {
+ $pipe.Connect()
+ $pipe.Read($input_bytes, 0, $bytes_length) > $null
+ }
+ finally {
+ $pipe.Close()
+ }
+ $exec = [System.Text.Encoding]::UTF8.GetString($input_bytes)
+ $exec_parts = $exec.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ Set-Variable -Name json_raw -Value $exec_parts[1]
+ $exec = [ScriptBlock]::Create($exec_parts[0])
+ &$exec
+}
+
+$bootstrap_wrapper = $bootstrap_wrapper.ToString().Replace('$pipe_name = ""', "`$pipe_name = `"$pipe_name`"")
+$bootstrap_wrapper = $bootstrap_wrapper.Replace('$bytes_length = 0', "`$bytes_length = $($payload_bytes.Count)")
+$encoded_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper))
+$pwsh_path = "$env:SystemRoot\System32\WindowsPowerShell\v1.0\powershell.exe"
+$exec_args = "`"$pwsh_path`" -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command"
+
+# create a named pipe that is set to allow only the current user read access
+$current_user = ([Security.Principal.WindowsIdentity]::GetCurrent()).User
+$pipe_sec = New-Object -TypeName System.IO.Pipes.PipeSecurity
+$pipe_ar = New-Object -TypeName System.IO.Pipes.PipeAccessRule -ArgumentList @(
+ $current_user,
+ [System.IO.Pipes.PipeAccessRights]::Read,
+ [System.Security.AccessControl.AccessControlType]::Allow
+)
+$pipe_sec.AddAccessRule($pipe_ar)
+
+Write-AnsibleLog "INFO - creating named pipe '$pipe_name'" "async_wrapper"
+$pipe = New-Object -TypeName System.IO.Pipes.NamedPipeServerStream -ArgumentList @(
+ $pipe_name,
+ [System.IO.Pipes.PipeDirection]::Out,
+ 1,
+ [System.IO.Pipes.PipeTransmissionMode]::Byte,
+ [System.IO.Pipes.PipeOptions]::Asynchronous,
+ 0,
+ 0,
+ $pipe_sec
+)
+
+try {
+ Write-AnsibleLog "INFO - creating async process '$exec_args'" "async_wrapper"
+ $process = Invoke-CimMethod -ClassName Win32_Process -Name Create -Arguments @{CommandLine = $exec_args }
+ $rc = $process.ReturnValue
+
+ Write-AnsibleLog "INFO - return value from async process exec: $rc" "async_wrapper"
+ if ($rc -ne 0) {
+ $error_msg = switch ($rc) {
+ 2 { "Access denied" }
+ 3 { "Insufficient privilege" }
+ 8 { "Unknown failure" }
+ 9 { "Path not found" }
+ 21 { "Invalid parameter" }
+ default { "Other" }
+ }
+ throw "Failed to start async process: $rc ($error_msg)"
+ }
+ $watchdog_pid = $process.ProcessId
+ Write-AnsibleLog "INFO - created async process PID: $watchdog_pid" "async_wrapper"
+
+ # populate initial results before we send the async data to avoid result race
+ $result = @{
+ started = 1;
+ finished = 0;
+ results_file = $results_path;
+ ansible_job_id = $local_jid;
+ _ansible_suppress_tmpdir_delete = $true;
+ ansible_async_watchdog_pid = $watchdog_pid
+ }
+
+ Write-AnsibleLog "INFO - writing initial async results to '$results_path'" "async_wrapper"
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content $results_path -Value $result_json
+
+ $np_timeout = $Payload.async_startup_timeout * 1000
+ Write-AnsibleLog "INFO - waiting for async process to connect to named pipe for $np_timeout milliseconds" "async_wrapper"
+ $wait_async = $pipe.BeginWaitForConnection($null, $null)
+ $wait_async.AsyncWaitHandle.WaitOne($np_timeout) > $null
+ if (-not $wait_async.IsCompleted) {
+ $msg = "Ansible encountered a timeout while waiting for the async task to start and connect to the named"
+ $msg += "pipe. This can be affected by the performance of the target - you can increase this timeout using"
+ $msg += "WIN_ASYNC_STARTUP_TIMEOUT or just for this host using the win_async_startup_timeout hostvar if "
+ $msg += "this keeps happening."
+ throw $msg
+ }
+ $pipe.EndWaitForConnection($wait_async)
+
+ Write-AnsibleLog "INFO - writing exec_wrapper and payload to async process" "async_wrapper"
+ $pipe.Write($payload_bytes, 0, $payload_bytes.Count)
+ $pipe.Flush()
+ $pipe.WaitForPipeDrain()
+}
+finally {
+ $pipe.Close()
+}
+
+Write-AnsibleLog "INFO - outputting initial async result: $result_json" "async_wrapper"
+Write-Output -InputObject $result_json
+Write-AnsibleLog "INFO - ending async_wrapper" "async_wrapper"
diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1
new file mode 100644
index 0000000..f40e265
--- /dev/null
+++ b/lib/ansible/executor/powershell/become_wrapper.ps1
@@ -0,0 +1,163 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+#Requires -Module Ansible.ModuleUtils.AddType
+#AnsibleRequires -CSharpUtil Ansible.AccessToken
+#AnsibleRequires -CSharpUtil Ansible.Become
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting become_wrapper" "become_wrapper"
+
+Function Get-EnumValue($enum, $flag_type, $value) {
+ $raw_enum_value = $value.Replace('_', '')
+ try {
+ $enum_value = [Enum]::Parse($enum, $raw_enum_value, $true)
+ }
+ catch [System.ArgumentException] {
+ $valid_options = [Enum]::GetNames($enum) | ForEach-Object -Process {
+ (($_ -creplace "(.)([A-Z][a-z]+)", '$1_$2') -creplace "([a-z0-9])([A-Z])", '$1_$2').ToString().ToLower()
+ }
+ throw "become_flags $flag_type value '$value' is not valid, valid values are: $($valid_options -join ", ")"
+ }
+ return $enum_value
+}
+
+Function Get-BecomeFlag($flags) {
+ $logon_type = [Ansible.AccessToken.LogonType]::Interactive
+ $logon_flags = [Ansible.Become.LogonFlags]::WithProfile
+
+ if ($null -eq $flags -or $flags -eq "") {
+ $flag_split = @()
+ }
+ elseif ($flags -is [string]) {
+ $flag_split = $flags.Split(" ")
+ }
+ else {
+ throw "become_flags must be a string, was $($flags.GetType())"
+ }
+
+ foreach ($flag in $flag_split) {
+ $split = $flag.Split("=")
+ if ($split.Count -ne 2) {
+ throw "become_flags entry '$flag' is in an invalid format, must be a key=value pair"
+ }
+ $flag_key = $split[0]
+ $flag_value = $split[1]
+ if ($flag_key -eq "logon_type") {
+ $enum_details = @{
+ enum = [Ansible.AccessToken.LogonType]
+ flag_type = $flag_key
+ value = $flag_value
+ }
+ $logon_type = Get-EnumValue @enum_details
+ }
+ elseif ($flag_key -eq "logon_flags") {
+ $logon_flag_values = $flag_value.Split(",")
+ $logon_flags = 0 -as [Ansible.Become.LogonFlags]
+ foreach ($logon_flag_value in $logon_flag_values) {
+ if ($logon_flag_value -eq "") {
+ continue
+ }
+ $enum_details = @{
+ enum = [Ansible.Become.LogonFlags]
+ flag_type = $flag_key
+ value = $logon_flag_value
+ }
+ $logon_flag = Get-EnumValue @enum_details
+ $logon_flags = $logon_flags -bor $logon_flag
+ }
+ }
+ else {
+ throw "become_flags key '$flag_key' is not a valid runas flag, must be 'logon_type' or 'logon_flags'"
+ }
+ }
+
+ return $logon_type, [Ansible.Become.LogonFlags]$logon_flags
+}
+
+Write-AnsibleLog "INFO - loading C# become code" "become_wrapper"
+$add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
+$add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
+New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
+
+$new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
+$access_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.AccessToken"]))
+$become_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Become"]))
+$process_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Process"]))
+Add-CSharpType -References $access_def, $become_def, $process_def -TempPath $new_tmp -IncludeDebugInfo
+
+$username = $Payload.become_user
+$password = $Payload.become_password
+# We need to set password to the value of NullString so a null password is preserved when crossing the .NET
+# boundary. If we pass $null it will automatically be converted to "" and we need to keep the distinction for
+# accounts that don't have a password and when someone wants to become without knowing the password.
+if ($null -eq $password) {
+ $password = [NullString]::Value
+}
+
+try {
+ $logon_type, $logon_flags = Get-BecomeFlag -flags $Payload.become_flags
+}
+catch {
+ Write-AnsibleError -Message "internal error: failed to parse become_flags '$($Payload.become_flags)'" -ErrorRecord $_
+ $host.SetShouldExit(1)
+ return
+}
+Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_type', flags: '$logon_flags'" "become_wrapper"
+
+# NB: CreateProcessWithTokenW commandline maxes out at 1024 chars, must
+# bootstrap via small wrapper which contains the exec_wrapper passed through the
+# stdin pipe. Cannot use 'powershell -' as the $ErrorActionPreference is always
+# set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially
+# sensitive content from being logged by the scriptblock logger.
+$bootstrap_wrapper = {
+ &chcp.com 65001 > $null
+ $exec_wrapper_str = [System.Console]::In.ReadToEnd()
+ $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ Set-Variable -Name json_raw -Value $split_parts[1]
+ $exec_wrapper = [ScriptBlock]::Create($split_parts[0])
+ &$exec_wrapper
+}
+$exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString()))
+$lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command"
+$lp_current_directory = $env:SystemRoot # TODO: should this be set to the become user's profile dir?
+
+# pop the become_wrapper action so we don't get stuck in a loop
+$Payload.actions = $Payload.actions[1..99]
+# we want the output from the exec_wrapper to be base64 encoded to preserve unicode chars
+$Payload.encoded_output = $true
+
+$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
+# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
+$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
+$exec_wrapper += "`0`0`0`0" + $payload_json
+
+try {
+ Write-AnsibleLog "INFO - starting become process '$lp_command_line'" "become_wrapper"
+ $result = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($username, $password, $logon_flags, $logon_type,
+ $null, $lp_command_line, $lp_current_directory, $null, $exec_wrapper)
+ Write-AnsibleLog "INFO - become process complete with rc: $($result.ExitCode)" "become_wrapper"
+ $stdout = $result.StandardOut
+ try {
+ $stdout = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($stdout))
+ }
+ catch [FormatException] {
+ # output wasn't Base64, ignore as it may contain an error message we want to pass to Ansible
+ Write-AnsibleLog "WARN - become process stdout was not base64 encoded as expected: $stdout"
+ }
+
+ $host.UI.WriteLine($stdout)
+ $host.UI.WriteErrorLine($result.StandardError.Trim())
+ $host.SetShouldExit($result.ExitCode)
+}
+catch {
+ Write-AnsibleError -Message "internal error: failed to become user '$username'" -ErrorRecord $_
+ $host.SetShouldExit(1)
+}
+
+Write-AnsibleLog "INFO - ending become_wrapper" "become_wrapper"
diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
new file mode 100644
index 0000000..cdba80c
--- /dev/null
+++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
@@ -0,0 +1,13 @@
+&chcp.com 65001 > $null
+
+if ($PSVersionTable.PSVersion -lt [Version]"3.0") {
+ '{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}'
+ exit 1
+}
+
+$exec_wrapper_str = $input | Out-String
+$split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+If (-not $split_parts.Length -eq 2) { throw "invalid payload" }
+Set-Variable -Name json_raw -Value $split_parts[1]
+$exec_wrapper = [ScriptBlock]::Create($split_parts[0])
+&$exec_wrapper
diff --git a/lib/ansible/executor/powershell/coverage_wrapper.ps1 b/lib/ansible/executor/powershell/coverage_wrapper.ps1
new file mode 100644
index 0000000..26cbe66
--- /dev/null
+++ b/lib/ansible/executor/powershell/coverage_wrapper.ps1
@@ -0,0 +1,199 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting coverage_wrapper" "coverage_wrapper"
+
+# Required to be set for psrp to we can set a breakpoint in the remote runspace
+if ($PSVersionTable.PSVersion -ge [Version]'4.0') {
+ $host.Runspace.Debugger.SetDebugMode([System.Management.Automation.DebugModes]::RemoteScript)
+}
+
+Function New-CoverageBreakpoint {
+ Param (
+ [String]$Path,
+ [ScriptBlock]$Code,
+ [String]$AnsiblePath
+ )
+
+ # It is quicker to pass in the code as a string instead of calling ParseFile as we already know the contents
+ $predicate = {
+ $args[0] -is [System.Management.Automation.Language.CommandBaseAst]
+ }
+ $script_cmds = $Code.Ast.FindAll($predicate, $true)
+
+ # Create an object that tracks the Ansible path of the file and the breakpoints that have been set in it
+ $info = [PSCustomObject]@{
+ Path = $AnsiblePath
+ Breakpoints = [System.Collections.Generic.List`1[System.Management.Automation.Breakpoint]]@()
+ }
+
+ # Keep track of lines that are already scanned. PowerShell can contains multiple commands in 1 line
+ $scanned_lines = [System.Collections.Generic.HashSet`1[System.Int32]]@()
+ foreach ($cmd in $script_cmds) {
+ if (-not $scanned_lines.Add($cmd.Extent.StartLineNumber)) {
+ continue
+ }
+
+ # Do not add any -Action value, even if it is $null or {}. Doing so will balloon the runtime.
+ $params = @{
+ Script = $Path
+ Line = $cmd.Extent.StartLineNumber
+ Column = $cmd.Extent.StartColumnNumber
+ }
+ $info.Breakpoints.Add((Set-PSBreakpoint @params))
+ }
+
+ $info
+}
+
+Function Compare-PathFilterPattern {
+ Param (
+ [String[]]$Patterns,
+ [String]$Path
+ )
+
+ foreach ($pattern in $Patterns) {
+ if ($Path -like $pattern) {
+ return $true
+ }
+ }
+ return $false
+}
+
+$module_name = $Payload.module_args["_ansible_module_name"]
+Write-AnsibleLog "INFO - building coverage payload for '$module_name'" "coverage_wrapper"
+
+# A PS Breakpoint needs an actual path to work properly, we create a temp directory that will store the module and
+# module_util code during execution
+$temp_path = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath "ansible-coverage-$([System.IO.Path]::GetRandomFileName())"
+Write-AnsibleLog "INFO - Creating temp path for coverage files '$temp_path'" "coverage_wrapper"
+New-Item -Path $temp_path -ItemType Directory > $null
+$breakpoint_info = [System.Collections.Generic.List`1[PSObject]]@()
+
+# Ensures we create files with UTF-8 encoding and a BOM. This is critical to force the powershell engine to read files
+# as UTF-8 and not as the system's codepage.
+$file_encoding = 'UTF8'
+
+try {
+ $scripts = [System.Collections.Generic.List`1[System.Object]]@($script:common_functions)
+
+ $coverage_path_filter = $Payload.coverage.path_filter.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
+
+ # We need to track what utils have already been added to the script for loading. This is because the load
+ # order is important and can have module_utils that rely on other utils.
+ $loaded_utils = [System.Collections.Generic.HashSet`1[System.String]]@()
+ $parse_util = {
+ $util_name = $args[0]
+ if (-not $loaded_utils.Add($util_name)) {
+ return
+ }
+
+ $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.powershell_modules.$util_name))
+ $util_sb = [ScriptBlock]::Create($util_code)
+ $util_path = Join-Path -Path $temp_path -ChildPath "$($util_name).psm1"
+
+ Write-AnsibleLog "INFO - Outputting module_util $util_name to temp file '$util_path'" "coverage_wrapper"
+ Set-Content -LiteralPath $util_path -Value $util_code -Encoding $file_encoding
+
+ $ansible_path = $Payload.coverage.module_util_paths.$util_name
+ if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
+ $cov_params = @{
+ Path = $util_path
+ Code = $util_sb
+ AnsiblePath = $ansible_path
+ }
+ $breakpoints = New-CoverageBreakpoint @cov_params
+ $breakpoint_info.Add($breakpoints)
+ }
+
+ if ($null -ne $util_sb.Ast.ScriptRequirements) {
+ foreach ($required_util in $util_sb.Ast.ScriptRequirements.RequiredModules) {
+ &$parse_util $required_util.Name
+ }
+ }
+ Write-AnsibleLog "INFO - Adding util $util_name to scripts to run" "coverage_wrapper"
+ $scripts.Add("Import-Module -Name '$util_path'")
+ }
+ foreach ($util in $Payload.powershell_modules.Keys) {
+ &$parse_util $util
+ }
+
+ $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+ $module_path = Join-Path -Path $temp_path -ChildPath "$($module_name).ps1"
+ Write-AnsibleLog "INFO - Ouputting module $module_name to temp file '$module_path'" "coverage_wrapper"
+ Set-Content -LiteralPath $module_path -Value $module -Encoding $file_encoding
+ $scripts.Add($module_path)
+
+ $ansible_path = $Payload.coverage.module_path
+ if ((Compare-PathFilterPattern -Patterns $coverage_path_filter -Path $ansible_path)) {
+ $cov_params = @{
+ Path = $module_path
+ Code = [ScriptBlock]::Create($module)
+ AnsiblePath = $Payload.coverage.module_path
+ }
+ $breakpoints = New-CoverageBreakpoint @cov_params
+ $breakpoint_info.Add($breakpoints)
+ }
+
+ $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
+ $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
+ $entrypoint = [ScriptBlock]::Create($entrypoint)
+
+ $params = @{
+ Scripts = $scripts
+ Variables = $variables
+ Environment = $Payload.environment
+ ModuleName = $module_name
+ }
+ if ($breakpoint_info) {
+ $params.Breakpoints = $breakpoint_info.Breakpoints
+ }
+
+ try {
+ &$entrypoint @params
+ }
+ finally {
+ # Processing here is kept to an absolute minimum to make sure each task runtime is kept as small as
+ # possible. Once all the tests have been run ansible-test will collect this info and process it locally in
+ # one go.
+ Write-AnsibleLog "INFO - Creating coverage result output" "coverage_wrapper"
+ $coverage_info = @{}
+ foreach ($info in $breakpoint_info) {
+ $coverage_info.($info.Path) = $info.Breakpoints | Select-Object -Property Line, HitCount
+ }
+
+ # The coverage.output value is a filename set by the Ansible controller. We append some more remote side
+ # info to the filename to make it unique and identify the remote host a bit more.
+ $ps_version = "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)"
+ $coverage_output_path = "$($Payload.coverage.output)=powershell-$ps_version=coverage.$($env:COMPUTERNAME).$PID.$(Get-Random)"
+ $code_cov_json = ConvertTo-Json -InputObject $coverage_info -Compress
+
+ Write-AnsibleLog "INFO - Outputting coverage json to '$coverage_output_path'" "coverage_wrapper"
+ # Ansible controller expects these files to be UTF-8 without a BOM, use .NET for this.
+ $utf8_no_bom = New-Object -TypeName System.Text.UTF8Encoding -ArgumentList $false
+ [System.IO.File]::WriteAllbytes($coverage_output_path, $utf8_no_bom.GetBytes($code_cov_json))
+ }
+}
+finally {
+ try {
+ if ($breakpoint_info) {
+ foreach ($b in $breakpoint_info.Breakpoints) {
+ Remove-PSBreakpoint -Breakpoint $b
+ }
+ }
+ }
+ finally {
+ Write-AnsibleLog "INFO - Remove temp coverage folder '$temp_path'" "coverage_wrapper"
+ Remove-Item -LiteralPath $temp_path -Force -Recurse
+ }
+}
+
+Write-AnsibleLog "INFO - ending coverage_wrapper" "coverage_wrapper"
diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1
new file mode 100644
index 0000000..0f97bdf
--- /dev/null
+++ b/lib/ansible/executor/powershell/exec_wrapper.ps1
@@ -0,0 +1,237 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+begin {
+ $DebugPreference = "Continue"
+ $ProgressPreference = "SilentlyContinue"
+ $ErrorActionPreference = "Stop"
+ Set-StrictMode -Version 2
+
+ # common functions that are loaded in exec and module context, this is set
+ # as a script scoped variable so async_watchdog and module_wrapper can
+ # access the functions when creating their Runspaces
+ $script:common_functions = {
+ Function ConvertFrom-AnsibleJson {
+ <#
+ .SYNOPSIS
+ Converts a JSON string to a Hashtable/Array in the fastest way
+ possible. Unfortunately ConvertFrom-Json is still faster but outputs
+ a PSCustomObject which is combersone for module consumption.
+
+ .PARAMETER InputObject
+ [String] The JSON string to deserialize.
+ #>
+ param(
+ [Parameter(Mandatory = $true, Position = 0)][String]$InputObject
+ )
+
+ # we can use -AsHashtable to get PowerShell to convert the JSON to
+ # a Hashtable and not a PSCustomObject. This was added in PowerShell
+ # 6.0, fall back to a manual conversion for older versions
+ $cmdlet = Get-Command -Name ConvertFrom-Json -CommandType Cmdlet
+ if ("AsHashtable" -in $cmdlet.Parameters.Keys) {
+ return , (ConvertFrom-Json -InputObject $InputObject -AsHashtable)
+ }
+ else {
+ # get the PSCustomObject and then manually convert from there
+ $raw_obj = ConvertFrom-Json -InputObject $InputObject
+
+ Function ConvertTo-Hashtable {
+ param($InputObject)
+
+ if ($null -eq $InputObject) {
+ return $null
+ }
+
+ if ($InputObject -is [PSCustomObject]) {
+ $new_value = @{}
+ foreach ($prop in $InputObject.PSObject.Properties.GetEnumerator()) {
+ $new_value.($prop.Name) = (ConvertTo-Hashtable -InputObject $prop.Value)
+ }
+ return , $new_value
+ }
+ elseif ($InputObject -is [Array]) {
+ $new_value = [System.Collections.ArrayList]@()
+ foreach ($val in $InputObject) {
+ $new_value.Add((ConvertTo-Hashtable -InputObject $val)) > $null
+ }
+ return , $new_value.ToArray()
+ }
+ else {
+ return , $InputObject
+ }
+ }
+ return , (ConvertTo-Hashtable -InputObject $raw_obj)
+ }
+ }
+
+ Function Format-AnsibleException {
+ <#
+ .SYNOPSIS
+ Formats a PowerShell ErrorRecord to a string that's fit for human
+ consumption.
+
+ .NOTES
+ Using Out-String can give us the first part of the exception but it
+ also wraps the messages at 80 chars which is not ideal. We also
+ append the ScriptStackTrace and the .NET StackTrace if present.
+ #>
+ param([System.Management.Automation.ErrorRecord]$ErrorRecord)
+
+ $exception = @"
+$($ErrorRecord.ToString())
+$($ErrorRecord.InvocationInfo.PositionMessage)
+ + CategoryInfo : $($ErrorRecord.CategoryInfo.ToString())
+ + FullyQualifiedErrorId : $($ErrorRecord.FullyQualifiedErrorId.ToString())
+"@
+ # module_common strip comments and empty newlines, need to manually
+ # add a preceding newline using `r`n
+ $exception += "`r`n`r`nScriptStackTrace:`r`n$($ErrorRecord.ScriptStackTrace)`r`n"
+
+ # exceptions from C# will also have a StackTrace which we
+ # append if found
+ if ($null -ne $ErrorRecord.Exception.StackTrace) {
+ $exception += "`r`n$($ErrorRecord.Exception.ToString())"
+ }
+
+ return $exception
+ }
+ }
+ .$common_functions
+
+ # common wrapper functions used in the exec wrappers, this is defined in a
+ # script scoped variable so async_watchdog can pass them into the async job
+ $script:wrapper_functions = {
+ Function Write-AnsibleError {
+ <#
+ .SYNOPSIS
+ Writes an error message to a JSON string in the format that Ansible
+ understands. Also optionally adds an exception record if the
+ ErrorRecord is passed through.
+ #>
+ param(
+ [Parameter(Mandatory = $true)][String]$Message,
+ [System.Management.Automation.ErrorRecord]$ErrorRecord = $null
+ )
+ $result = @{
+ msg = $Message
+ failed = $true
+ }
+ if ($null -ne $ErrorRecord) {
+ $result.msg += ": $($ErrorRecord.Exception.Message)"
+ $result.exception = (Format-AnsibleException -ErrorRecord $ErrorRecord)
+ }
+ Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
+ }
+
+ Function Write-AnsibleLog {
+ <#
+ .SYNOPSIS
+ Used as a debugging tool to log events to a file as they run in the
+ exec wrappers. By default this is a noop function but the $log_path
+ can be manually set to enable it. Manually set ANSIBLE_EXEC_DEBUG as
+ an env value on the Windows host that this is run on to enable.
+ #>
+ param(
+ [Parameter(Mandatory = $true, Position = 0)][String]$Message,
+ [Parameter(Position = 1)][String]$Wrapper
+ )
+
+ $log_path = $env:ANSIBLE_EXEC_DEBUG
+ if ($log_path) {
+ $log_path = [System.Environment]::ExpandEnvironmentVariables($log_path)
+ $parent_path = [System.IO.Path]::GetDirectoryName($log_path)
+ if (Test-Path -LiteralPath $parent_path -PathType Container) {
+ $msg = "{0:u} - {1} - {2} - " -f (Get-Date), $pid, ([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)
+ if ($null -ne $Wrapper) {
+ $msg += "$Wrapper - "
+ }
+ $msg += $Message + "`r`n"
+ $msg_bytes = [System.Text.Encoding]::UTF8.GetBytes($msg)
+
+ $fs = [System.IO.File]::Open($log_path, [System.IO.FileMode]::Append,
+ [System.IO.FileAccess]::Write, [System.IO.FileShare]::ReadWrite)
+ try {
+ $fs.Write($msg_bytes, 0, $msg_bytes.Length)
+ }
+ finally {
+ $fs.Close()
+ }
+ }
+ }
+ }
+ }
+ .$wrapper_functions
+
+ # only init and stream in $json_raw if it wasn't set by the enclosing scope
+ if (-not $(Get-Variable "json_raw" -ErrorAction SilentlyContinue)) {
+ $json_raw = ''
+ }
+} process {
+ $json_raw += [String]$input
+} end {
+ Write-AnsibleLog "INFO - starting exec_wrapper" "exec_wrapper"
+ if (-not $json_raw) {
+ Write-AnsibleError -Message "internal error: no input given to PowerShell exec wrapper"
+ exit 1
+ }
+
+ Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper"
+ $payload = ConvertFrom-AnsibleJson -InputObject $json_raw
+
+ # TODO: handle binary modules
+ # TODO: handle persistence
+
+ if ($payload.min_os_version) {
+ $min_os_version = [Version]$payload.min_os_version
+ # Environment.OSVersion.Version is deprecated and may not return the
+ # right version
+ $actual_os_version = [Version](Get-Item -Path $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion
+
+ Write-AnsibleLog "INFO - checking if actual os version '$actual_os_version' is less than the min os version '$min_os_version'" "exec_wrapper"
+ if ($actual_os_version -lt $min_os_version) {
+ $msg = "internal error: This module cannot run on this OS as it requires a minimum version of $min_os_version, actual was $actual_os_version"
+ Write-AnsibleError -Message $msg
+ exit 1
+ }
+ }
+ if ($payload.min_ps_version) {
+ $min_ps_version = [Version]$payload.min_ps_version
+ $actual_ps_version = $PSVersionTable.PSVersion
+
+ Write-AnsibleLog "INFO - checking if actual PS version '$actual_ps_version' is less than the min PS version '$min_ps_version'" "exec_wrapper"
+ if ($actual_ps_version -lt $min_ps_version) {
+ $msg = "internal error: This module cannot run as it requires a minimum PowerShell version of $min_ps_version, actual was $actual_ps_version"
+ Write-AnsibleError -Message $msg
+ exit 1
+ }
+ }
+
+ # pop 0th action as entrypoint
+ $action = $payload.actions[0]
+ Write-AnsibleLog "INFO - running action $action" "exec_wrapper"
+
+ $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.($action)))
+ $entrypoint = [ScriptBlock]::Create($entrypoint)
+ # so we preserve the formatting and don't fall prey to locale issues, some
+ # wrappers want the output to be in base64 form, we store the value here in
+ # case the wrapper changes the value when they create a payload for their
+ # own exec_wrapper
+ $encoded_output = $payload.encoded_output
+
+ try {
+ $output = &$entrypoint -Payload $payload
+ if ($encoded_output -and $null -ne $output) {
+ $b64_output = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($output))
+ Write-Output -InputObject $b64_output
+ }
+ else {
+ $output
+ }
+ }
+ catch {
+ Write-AnsibleError -Message "internal error: failed to run exec_wrapper action $action" -ErrorRecord $_
+ exit 1
+ }
+ Write-AnsibleLog "INFO - ending exec_wrapper" "exec_wrapper"
+}
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
new file mode 100644
index 0000000..970e848
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -0,0 +1,402 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import errno
+import json
+import os
+import pkgutil
+import random
+import re
+
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.compat.importlib import import_module
+from ansible.plugins.loader import ps_module_utils_loader
+from ansible.utils.collection_loader import resource_from_fqcr
+
+
+class PSModuleDepFinder(object):
+
+ def __init__(self):
+ # This is also used by validate-modules to get a module's required utils in base and a collection.
+ self.ps_modules = dict()
+ self.exec_scripts = dict()
+
+ # by defining an explicit dict of cs utils and where they are used, we
+ # can potentially save time by not adding the type multiple times if it
+ # isn't needed
+ self.cs_utils_wrapper = dict()
+ self.cs_utils_module = dict()
+
+ self.ps_version = None
+ self.os_version = None
+ self.become = False
+
+ self._re_cs_module = [
+ # Reference C# module_util in another C# util, this must always be the fully qualified name.
+ # 'using ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ re.compile(to_bytes(r'(?i)^using\s((Ansible\..+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+));\s*$')),
+ ]
+
+ self._re_cs_in_ps_module = [
+ # Reference C# module_util in a PowerShell module
+ # '#AnsibleRequires -CSharpUtil Ansible.{name}'
+ # '#AnsibleRequires -CSharpUtil ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ # '#AnsibleRequires -CSharpUtil ..module_utils.{name}'
+ # Can have '-Optional' at the end to denote the util is optional
+ re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((Ansible\.[\w\.]+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
+ r'(\.[\w\.]+))(?P<optional>\s+-Optional){0,1}')),
+ ]
+
+ self._re_ps_module = [
+ # Original way of referencing a builtin module_util
+ # '#Requires -Module Ansible.ModuleUtils.{name}
+ re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)')),
+ # New way of referencing a builtin and collection module_util
+ # '#AnsibleRequires -PowerShell Ansible.ModuleUtils.{name}'
+ # '#AnsibleRequires -PowerShell ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ # '#AnsibleRequires -PowerShell ..module_utils.{name}'
+ # Can have '-Optional' at the end to denote the util is optional
+ re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-powershell\s+((Ansible\.ModuleUtils\.[\w\.]+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
+ r'(\.[\w\.]+))(?P<optional>\s+-Optional){0,1}')),
+ ]
+
+ self._re_wrapper = re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-wrapper\s+(\w*)'))
+ self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
+ self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
+ self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
+
+ def scan_module(self, module_data, fqn=None, wrapper=False, powershell=True):
+ lines = module_data.split(b'\n')
+ module_utils = set()
+ if wrapper:
+ cs_utils = self.cs_utils_wrapper
+ else:
+ cs_utils = self.cs_utils_module
+
+ if powershell:
+ checks = [
+ # PS module contains '#Requires -Module Ansible.ModuleUtils.*'
+ # PS module contains '#AnsibleRequires -Powershell Ansible.*' (or collections module_utils ref)
+ (self._re_ps_module, self.ps_modules, ".psm1"),
+ # PS module contains '#AnsibleRequires -CSharpUtil Ansible.*' (or collections module_utils ref)
+ (self._re_cs_in_ps_module, cs_utils, ".cs"),
+ ]
+ else:
+ checks = [
+ # CS module contains 'using Ansible.*;' or 'using ansible_collections.ns.coll.plugins.module_utils.*;'
+ (self._re_cs_module, cs_utils, ".cs"),
+ ]
+
+ for line in lines:
+ for check in checks:
+ for pattern in check[0]:
+ match = pattern.match(line)
+ if match:
+ # tolerate windows line endings by stripping any remaining
+ # newline chars
+ module_util_name = to_text(match.group(1).rstrip())
+ match_dict = match.groupdict()
+ optional = match_dict.get('optional', None) is not None
+
+ if module_util_name not in check[1].keys():
+ module_utils.add((module_util_name, check[2], fqn, optional))
+
+ break
+
+ if powershell:
+ ps_version_match = self._re_ps_version.match(line)
+ if ps_version_match:
+ self._parse_version_match(ps_version_match, "ps_version")
+
+ os_version_match = self._re_os_version.match(line)
+ if os_version_match:
+ self._parse_version_match(os_version_match, "os_version")
+
+ # once become is set, no need to keep on checking recursively
+ if not self.become:
+ become_match = self._re_become.match(line)
+ if become_match:
+ self.become = True
+
+ if wrapper:
+ wrapper_match = self._re_wrapper.match(line)
+ if wrapper_match:
+ self.scan_exec_script(wrapper_match.group(1).rstrip())
+
+ # recursively drill into each Requires to see if there are any more
+ # requirements
+ for m in set(module_utils):
+ self._add_module(*m, wrapper=wrapper)
+
+ def scan_exec_script(self, name):
+ # scans lib/ansible/executor/powershell for scripts used in the module
+ # exec side. It also scans these scripts for any dependencies
+ name = to_text(name)
+ if name in self.exec_scripts.keys():
+ return
+
+ data = pkgutil.get_data("ansible.executor.powershell", to_native(name + ".ps1"))
+ if data is None:
+ raise AnsibleError("Could not find executor powershell script "
+ "for '%s'" % name)
+
+ b_data = to_bytes(data)
+
+ # remove comments to reduce the payload size in the exec wrappers
+ if C.DEFAULT_DEBUG:
+ exec_script = b_data
+ else:
+ exec_script = _strip_comments(b_data)
+ self.exec_scripts[name] = to_bytes(exec_script)
+ self.scan_module(b_data, wrapper=True, powershell=True)
+
+ def _add_module(self, name, ext, fqn, optional, wrapper=False):
+ m = to_text(name)
+
+ util_fqn = None
+
+ if m.startswith("Ansible."):
+ # Builtin util, use plugin loader to get the data
+ mu_path = ps_module_utils_loader.find_plugin(m, ext)
+
+ if not mu_path:
+ if optional:
+ return
+
+ raise AnsibleError('Could not find imported module support code '
+ 'for \'%s\'' % m)
+
+ module_util_data = to_bytes(_slurp(mu_path))
+ else:
+ # Collection util, load the package data based on the util import.
+
+ submodules = m.split(".")
+ if m.startswith('.'):
+ fqn_submodules = fqn.split('.')
+ for submodule in submodules:
+ if submodule:
+ break
+ del fqn_submodules[-1]
+
+ submodules = fqn_submodules + [s for s in submodules if s]
+
+ n_package_name = to_native('.'.join(submodules[:-1]), errors='surrogate_or_strict')
+ n_resource_name = to_native(submodules[-1] + ext, errors='surrogate_or_strict')
+
+ try:
+ module_util = import_module(n_package_name)
+ pkg_data = pkgutil.get_data(n_package_name, n_resource_name)
+ if pkg_data is None:
+ raise ImportError("No package data found")
+
+ module_util_data = to_bytes(pkg_data, errors='surrogate_or_strict')
+ util_fqn = to_text("%s.%s " % (n_package_name, submodules[-1]), errors='surrogate_or_strict')
+
+ # Get the path of the util which is required for coverage collection.
+ resource_paths = list(module_util.__path__)
+ if len(resource_paths) != 1:
+ # This should never happen with a collection but we are just being defensive about it.
+ raise AnsibleError("Internal error: Referenced module_util package '%s' contains 0 or multiple "
+ "import locations when we only expect 1." % n_package_name)
+ mu_path = os.path.join(resource_paths[0], n_resource_name)
+ except (ImportError, OSError) as err:
+ if getattr(err, "errno", errno.ENOENT) == errno.ENOENT:
+ if optional:
+ return
+
+ raise AnsibleError('Could not find collection imported module support code for \'%s\''
+ % to_native(m))
+
+ else:
+ raise
+
+ util_info = {
+ 'data': module_util_data,
+ 'path': to_text(mu_path),
+ }
+ if ext == ".psm1":
+ self.ps_modules[m] = util_info
+ else:
+ if wrapper:
+ self.cs_utils_wrapper[m] = util_info
+ else:
+ self.cs_utils_module[m] = util_info
+ self.scan_module(module_util_data, fqn=util_fqn, wrapper=wrapper, powershell=(ext == ".psm1"))
+
+ def _parse_version_match(self, match, attribute):
+ new_version = to_text(match.group(1)).rstrip()
+
+ # PowerShell cannot cast a string of "1" to Version, it must have at
+ # least the major.minor for it to be valid so we append 0
+ if match.group(2) is None:
+ new_version = "%s.0" % new_version
+
+ existing_version = getattr(self, attribute, None)
+ if existing_version is None:
+ setattr(self, attribute, new_version)
+ else:
+ # determine which is the latest version and set that
+ if LooseVersion(new_version) > LooseVersion(existing_version):
+ setattr(self, attribute, new_version)
+
+
+def _slurp(path):
+ if not os.path.exists(path):
+ raise AnsibleError("imported module support code does not exist at %s"
+ % os.path.abspath(path))
+ fd = open(path, 'rb')
+ data = fd.read()
+ fd.close()
+ return data
+
+
+def _strip_comments(source):
+ # Strip comments and blank lines from the wrapper
+ buf = []
+ start_block = False
+ for line in source.splitlines():
+ l = line.strip()
+
+ if start_block and l.endswith(b'#>'):
+ start_block = False
+ continue
+ elif start_block:
+ continue
+ elif l.startswith(b'<#'):
+ start_block = True
+ continue
+ elif not l or l.startswith(b'#'):
+ continue
+
+ buf.append(line)
+ return b'\n'.join(buf)
+
+
+def _create_powershell_wrapper(b_module_data, module_path, module_args,
+ environment, async_timeout, become,
+ become_method, become_user, become_password,
+ become_flags, substyle, task_vars, module_fqn):
+ # creates the manifest/wrapper used in PowerShell/C# modules to enable
+ # things like become and async - this is also called in action/script.py
+
+ # FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
+ # if running under a persistent connection and substyle is C# so we
+ # don't have type conflicts
+ finder = PSModuleDepFinder()
+ if substyle != 'script':
+ # don't scan the module for util dependencies and other Ansible related
+ # flags if the substyle is 'script' which is set by action/script
+ finder.scan_module(b_module_data, fqn=module_fqn, powershell=(substyle == "powershell"))
+
+ module_wrapper = "module_%s_wrapper" % substyle
+ exec_manifest = dict(
+ module_entry=to_text(base64.b64encode(b_module_data)),
+ powershell_modules=dict(),
+ csharp_utils=dict(),
+ csharp_utils_module=list(), # csharp_utils only required by a module
+ module_args=module_args,
+ actions=[module_wrapper],
+ environment=environment,
+ encoded_output=False,
+ )
+ finder.scan_exec_script(module_wrapper)
+
+ if async_timeout > 0:
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('async_watchdog')
+ finder.scan_exec_script('async_wrapper')
+
+ exec_manifest["actions"].insert(0, 'async_watchdog')
+ exec_manifest["actions"].insert(0, 'async_wrapper')
+ exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
+ exec_manifest["async_timeout_sec"] = async_timeout
+ exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
+
+ if become and resource_from_fqcr(become_method) == 'runas': # runas and namespace.collection.runas
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('become_wrapper')
+
+ exec_manifest["actions"].insert(0, 'become_wrapper')
+ exec_manifest["become_user"] = become_user
+ exec_manifest["become_password"] = become_password
+ exec_manifest['become_flags'] = become_flags
+
+ exec_manifest['min_ps_version'] = finder.ps_version
+ exec_manifest['min_os_version'] = finder.os_version
+ if finder.become and 'become_wrapper' not in exec_manifest['actions']:
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('become_wrapper')
+
+ exec_manifest['actions'].insert(0, 'become_wrapper')
+ exec_manifest['become_user'] = 'SYSTEM'
+ exec_manifest['become_password'] = None
+ exec_manifest['become_flags'] = None
+
+ coverage_manifest = dict(
+ module_path=module_path,
+ module_util_paths=dict(),
+ output=None,
+ )
+ coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
+ if coverage_output and substyle == 'powershell':
+ finder.scan_exec_script('coverage_wrapper')
+ coverage_manifest['output'] = coverage_output
+
+ coverage_enabled = C.config.get_config_value('COVERAGE_REMOTE_PATHS', variables=task_vars)
+ coverage_manifest['path_filter'] = coverage_enabled
+
+ # make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
+ if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
+ finder._add_module(b"Ansible.ModuleUtils.AddType", ".psm1", None, False,
+ wrapper=False)
+
+ # exec_wrapper is only required to be part of the payload if using
+ # become or async, to save on payload space we check if exec_wrapper has
+ # already been added, and remove it manually if it hasn't later
+ exec_required = "exec_wrapper" in finder.exec_scripts.keys()
+ finder.scan_exec_script("exec_wrapper")
+ # must contain an empty newline so it runs the begin/process/end block
+ finder.exec_scripts["exec_wrapper"] += b"\n\n"
+
+ exec_wrapper = finder.exec_scripts["exec_wrapper"]
+ if not exec_required:
+ finder.exec_scripts.pop("exec_wrapper")
+
+ for name, data in finder.exec_scripts.items():
+ b64_data = to_text(base64.b64encode(data))
+ exec_manifest[name] = b64_data
+
+ for name, data in finder.ps_modules.items():
+ b64_data = to_text(base64.b64encode(data['data']))
+ exec_manifest['powershell_modules'][name] = b64_data
+ coverage_manifest['module_util_paths'][name] = data['path']
+
+ cs_utils = {}
+ for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
+ for name, data in cs_util.items():
+ cs_utils[name] = data['data']
+
+ for name, data in cs_utils.items():
+ b64_data = to_text(base64.b64encode(data))
+ exec_manifest['csharp_utils'][name] = b64_data
+ exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
+
+ # To save on the data we are sending across we only add the coverage info if coverage is being run
+ if 'coverage_wrapper' in exec_manifest:
+ exec_manifest['coverage'] = coverage_manifest
+
+ b_json = to_bytes(json.dumps(exec_manifest))
+ # delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
+ b_data = exec_wrapper + b'\0\0\0\0' + b_json
+ return b_data
diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
new file mode 100644
index 0000000..c35c84c
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
@@ -0,0 +1,75 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting module_powershell_wrapper" "module_powershell_wrapper"
+
+$module_name = $Payload.module_args["_ansible_module_name"]
+Write-AnsibleLog "INFO - building module payload for '$module_name'" "module_powershell_wrapper"
+
+# compile any C# module utils passed in from the controller, Add-CSharpType is
+# automatically added to the payload manifest if any csharp util is set
+$csharp_utils = [System.Collections.ArrayList]@()
+foreach ($csharp_util in $Payload.csharp_utils_module) {
+ Write-AnsibleLog "INFO - adding $csharp_util to list of C# references to compile" "module_powershell_wrapper"
+ $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils[$csharp_util]))
+ $csharp_utils.Add($util_code) > $null
+}
+if ($csharp_utils.Count -gt 0) {
+ $add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
+ $add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
+ New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
+
+ # add any C# references so the module does not have to do so
+ $new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
+ Add-CSharpType -References $csharp_utils -TempPath $new_tmp -IncludeDebugInfo
+}
+
+if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) {
+ $entrypoint = $payload.coverage_wrapper
+
+ $params = @{
+ Payload = $Payload
+ }
+}
+else {
+ # get the common module_wrapper code and invoke that to run the module
+ $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+ $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
+ $entrypoint = $Payload.module_wrapper
+
+ $params = @{
+ Scripts = @($script:common_functions, $module)
+ Variables = $variables
+ Environment = $Payload.environment
+ Modules = $Payload.powershell_modules
+ ModuleName = $module_name
+ }
+}
+
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
+$entrypoint = [ScriptBlock]::Create($entrypoint)
+
+try {
+ &$entrypoint @params
+}
+catch {
+ # failed to invoke the PowerShell module, capture the exception and
+ # output a pretty error for Ansible to parse
+ $result = @{
+ msg = "Failed to invoke PowerShell module: $($_.Exception.Message)"
+ failed = $true
+ exception = (Format-AnsibleException -ErrorRecord $_)
+ }
+ Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
+ $host.SetShouldExit(1)
+}
+
+Write-AnsibleLog "INFO - ending module_powershell_wrapper" "module_powershell_wrapper"
diff --git a/lib/ansible/executor/powershell/module_script_wrapper.ps1 b/lib/ansible/executor/powershell/module_script_wrapper.ps1
new file mode 100644
index 0000000..dd8420f
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_script_wrapper.ps1
@@ -0,0 +1,22 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory = $true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting module_script_wrapper" "module_script_wrapper"
+
+$script = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+
+# get the common module_wrapper code and invoke that to run the module
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
+$entrypoint = [ScriptBlock]::Create($entrypoint)
+
+&$entrypoint -Scripts $script -Environment $Payload.environment -ModuleName "script"
+
+Write-AnsibleLog "INFO - ending module_script_wrapper" "module_script_wrapper"
diff --git a/lib/ansible/executor/powershell/module_wrapper.ps1 b/lib/ansible/executor/powershell/module_wrapper.ps1
new file mode 100644
index 0000000..20a9677
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_wrapper.ps1
@@ -0,0 +1,226 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+<#
+.SYNOPSIS
+Invokes an Ansible module in a new Runspace. This cmdlet will output the
+module's output and write any errors to the error stream of the current
+host.
+
+.PARAMETER Scripts
+[Object[]] String or ScriptBlocks to execute.
+
+.PARAMETER Variables
+[System.Collections.ArrayList] The variables to set in the new Pipeline.
+Each value is a hashtable that contains the parameters to use with
+Set-Variable;
+ Name: the name of the variable to set
+ Value: the value of the variable to set
+ Scope: the scope of the variable
+
+.PARAMETER Environment
+[System.Collections.IDictionary] A Dictionary of environment key/values to
+set in the new Pipeline.
+
+.PARAMETER Modules
+[System.Collections.IDictionary] A Dictionary of PowerShell modules to
+import into the new Pipeline. The key is the name of the module and the
+value is a base64 string of the module util code.
+
+.PARAMETER ModuleName
+[String] The name of the module that is being executed.
+
+.PARAMETER Breakpoints
+A list of line breakpoints to add to the runspace debugger. This is used to
+track module and module_utils coverage.
+#>
+param(
+ [Object[]]$Scripts,
+ [System.Collections.ArrayList][AllowEmptyCollection()]$Variables,
+ [System.Collections.IDictionary]$Environment,
+ [System.Collections.IDictionary]$Modules,
+ [String]$ModuleName,
+ [System.Management.Automation.LineBreakpoint[]]$Breakpoints = @()
+)
+
+Write-AnsibleLog "INFO - creating new PowerShell pipeline for $ModuleName" "module_wrapper"
+$ps = [PowerShell]::Create()
+
+# do not set ErrorActionPreference for script
+if ($ModuleName -ne "script") {
+ $ps.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
+}
+
+# force input encoding to preamble-free UTF8 so PS sub-processes (eg,
+# Start-Job) don't blow up. This is only required for WinRM, a PSRP
+# runspace doesn't have a host console and this will bomb out
+if ($host.Name -eq "ConsoleHost") {
+ Write-AnsibleLog "INFO - setting console input encoding to UTF8 for $ModuleName" "module_wrapper"
+ $ps.AddScript('[Console]::InputEncoding = New-Object Text.UTF8Encoding $false').AddStatement() > $null
+}
+
+# set the variables
+foreach ($variable in $Variables) {
+ Write-AnsibleLog "INFO - setting variable '$($variable.Name)' for $ModuleName" "module_wrapper"
+ $ps.AddCommand("Set-Variable").AddParameters($variable).AddStatement() > $null
+}
+
+# set the environment vars
+if ($Environment) {
+ # Escaping quotes can be problematic, instead just pass the string to the runspace and set it directly.
+ Write-AnsibleLog "INFO - setting environment vars for $ModuleName" "module_wrapper"
+ $ps.Runspace.SessionStateProxy.SetVariable("_AnsibleEnvironment", $Environment)
+ $ps.AddScript(@'
+foreach ($env_kv in $_AnsibleEnvironment.GetEnumerator()) {
+ [System.Environment]::SetEnvironmentVariable($env_kv.Key, $env_kv.Value)
+}
+'@).AddStatement() > $null
+}
+
+# import the PS modules
+if ($Modules) {
+ foreach ($module in $Modules.GetEnumerator()) {
+ Write-AnsibleLog "INFO - create module util '$($module.Key)' for $ModuleName" "module_wrapper"
+ $module_name = $module.Key
+ $module_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($module.Value))
+ $ps.AddCommand("New-Module").AddParameters(@{Name = $module_name; ScriptBlock = [ScriptBlock]::Create($module_code) }) > $null
+ $ps.AddCommand("Import-Module").AddParameter("WarningAction", "SilentlyContinue") > $null
+ $ps.AddCommand("Out-Null").AddStatement() > $null
+ }
+}
+
+# redefine Write-Host to dump to output instead of failing
+# lots of scripts still use it
+$ps.AddScript('Function Write-Host($msg) { Write-Output -InputObject $msg }').AddStatement() > $null
+
+# add the scripts and run
+foreach ($script in $Scripts) {
+ $ps.AddScript($script).AddStatement() > $null
+}
+
+if ($Breakpoints.Count -gt 0) {
+ Write-AnsibleLog "INFO - adding breakpoint to runspace that will run the modules" "module_wrapper"
+ if ($PSVersionTable.PSVersion.Major -eq 3) {
+ # The SetBreakpoints method was only added in PowerShell v4+. We need to rely on a private method to
+ # achieve the same functionality in this older PowerShell version. This should be removed once we drop
+ # support for PowerShell v3.
+ $set_method = $ps.Runspace.Debugger.GetType().GetMethod(
+ 'AddLineBreakpoint', [System.Reflection.BindingFlags]'Instance, NonPublic'
+ )
+ foreach ($b in $Breakpoints) {
+ $set_method.Invoke($ps.Runspace.Debugger, [Object[]]@(, $b)) > $null
+ }
+ }
+ else {
+ $ps.Runspace.Debugger.SetBreakpoints($Breakpoints)
+ }
+}
+
+Write-AnsibleLog "INFO - start module exec with Invoke() - $ModuleName" "module_wrapper"
+
+# temporarily override the stdout stream and create our own in a StringBuilder
+# we use this to ensure there's always an Out pipe and that we capture the
+# output for things like async or psrp
+$orig_out = [System.Console]::Out
+$sb = New-Object -TypeName System.Text.StringBuilder
+$new_out = New-Object -TypeName System.IO.StringWriter -ArgumentList $sb
+try {
+ [System.Console]::SetOut($new_out)
+ $module_output = $ps.Invoke()
+}
+catch {
+ # uncaught exception while executing module, present a prettier error for
+ # Ansible to parse
+ $error_params = @{
+ Message = "Unhandled exception while executing module"
+ ErrorRecord = $_
+ }
+
+ # Be more defensive when trying to find the InnerException in case it isn't
+ # set. This shouldn't ever be the case but if it is then it makes it more
+ # difficult to track down the problem.
+ if ($_.Exception.PSObject.Properties.Name -contains "InnerException") {
+ $inner_exception = $_.Exception.InnerException
+ if ($inner_exception.PSObject.Properties.Name -contains "ErrorRecord") {
+ $error_params.ErrorRecord = $inner_exception.ErrorRecord
+ }
+ }
+
+ Write-AnsibleError @error_params
+ $host.SetShouldExit(1)
+ return
+}
+finally {
+ [System.Console]::SetOut($orig_out)
+ $new_out.Dispose()
+}
+
+# other types of errors may not throw an exception in Invoke but rather just
+# set the pipeline state to failed
+if ($ps.InvocationStateInfo.State -eq "Failed" -and $ModuleName -ne "script") {
+ $reason = $ps.InvocationStateInfo.Reason
+ $error_params = @{
+ Message = "Unhandled exception while executing module"
+ }
+
+ # The error record should always be set on the reason but this does not
+ # always happen on Server 2008 R2 for some reason (probably memory hotfix).
+ # Be defensive when trying to get the error record and fall back to other
+ # options.
+ if ($null -eq $reason) {
+ $error_params.Message += ": Unknown error"
+ }
+ elseif ($reason.PSObject.Properties.Name -contains "ErrorRecord") {
+ $error_params.ErrorRecord = $reason.ErrorRecord
+ }
+ else {
+ $error_params.Message += ": $($reason.ToString())"
+ }
+
+ Write-AnsibleError @error_params
+ $host.SetShouldExit(1)
+ return
+}
+
+Write-AnsibleLog "INFO - module exec ended $ModuleName" "module_wrapper"
+$stdout = $sb.ToString()
+if ($stdout) {
+ Write-Output -InputObject $stdout
+}
+if ($module_output.Count -gt 0) {
+ # do not output if empty collection
+ Write-AnsibleLog "INFO - using the output stream for module output - $ModuleName" "module_wrapper"
+ Write-Output -InputObject ($module_output -join "`r`n")
+}
+
+# we attempt to get the return code from the LASTEXITCODE variable
+# this is set explicitly in newer style variables when calling
+# ExitJson and FailJson. If set we set the current hosts' exit code
+# to that same value
+$rc = $ps.Runspace.SessionStateProxy.GetVariable("LASTEXITCODE")
+if ($null -ne $rc) {
+ Write-AnsibleLog "INFO - got an rc of $rc from $ModuleName exec" "module_wrapper"
+ $host.SetShouldExit($rc)
+}
+
+# PS3 doesn't properly set HadErrors in many cases, inspect the error stream as a fallback
+# with the trap handler that's now in place, this should only write to the output if
+# $ErrorActionPreference != "Stop", that's ok because this is sent to the stderr output
+# for a user to manually debug if something went horribly wrong
+if ($ps.HadErrors -or ($PSVersionTable.PSVersion.Major -lt 4 -and $ps.Streams.Error.Count -gt 0)) {
+ Write-AnsibleLog "WARN - module had errors, outputting error info $ModuleName" "module_wrapper"
+ # if the rc wasn't explicitly set, we return an exit code of 1
+ if ($null -eq $rc) {
+ $host.SetShouldExit(1)
+ }
+
+ # output each error to the error stream of the current pipeline
+ foreach ($err in $ps.Streams.Error) {
+ $error_msg = Format-AnsibleException -ErrorRecord $err
+
+ # need to use the current hosts's UI class as we may not have
+ # a console to write the stderr to, e.g. psrp
+ Write-AnsibleLog "WARN - error msg for for $($ModuleName):`r`n$error_msg" "module_wrapper"
+ $host.UI.WriteErrorLine($error_msg)
+ }
+}
diff --git a/lib/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py
new file mode 100644
index 0000000..ae8ccff
--- /dev/null
+++ b/lib/ansible/executor/process/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
new file mode 100644
index 0000000..5113b83
--- /dev/null
+++ b/lib/ansible/executor/process/worker.py
@@ -0,0 +1,226 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import traceback
+
+from jinja2.exceptions import TemplateNotFound
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.executor.task_executor import TaskExecutor
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+from ansible.utils.multiprocessing import context as multiprocessing_context
+
+__all__ = ['WorkerProcess']
+
+display = Display()
+
+
+class WorkerProcess(multiprocessing_context.Process): # type: ignore[name-defined]
+ '''
+ The worker thread class, which uses TaskExecutor to run tasks
+ read from a job queue and pushes results into a results queue
+ for reading later.
+ '''
+
+ def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
+
+ super(WorkerProcess, self).__init__()
+ # takes a task queue manager as the sole param:
+ self._final_q = final_q
+ self._task_vars = task_vars
+ self._host = host
+ self._task = task
+ self._play_context = play_context
+ self._loader = loader
+ self._variable_manager = variable_manager
+ self._shared_loader_obj = shared_loader_obj
+
+ # NOTE: this works due to fork, if switching to threads this should change to per thread storage of temp files
+ # clear var to ensure we only delete files for this child
+ self._loader._tempfiles = set()
+
+ def _save_stdin(self):
+ self._new_stdin = None
+ try:
+ if sys.stdin.isatty() and sys.stdin.fileno() is not None:
+ try:
+ self._new_stdin = os.fdopen(os.dup(sys.stdin.fileno()))
+ except OSError:
+ # couldn't dupe stdin, most likely because it's
+ # not a valid file descriptor
+ pass
+ except (AttributeError, ValueError):
+ # couldn't get stdin's fileno
+ pass
+
+ if self._new_stdin is None:
+ self._new_stdin = open(os.devnull)
+
+ def start(self):
+ '''
+ multiprocessing.Process replaces the worker's stdin with a new file
+ but we wish to preserve it if it is connected to a terminal.
+ Therefore dup a copy prior to calling the real start(),
+ ensuring the descriptor is preserved somewhere in the new child, and
+ make sure it is closed in the parent when start() completes.
+ '''
+
+ self._save_stdin()
+ # FUTURE: this lock can be removed once a more generalized pre-fork thread pause is in place
+ with display._lock:
+ try:
+ return super(WorkerProcess, self).start()
+ finally:
+ self._new_stdin.close()
+
+ def _hard_exit(self, e):
+ '''
+ There is no safe exception to return to higher level code that does not
+ risk an innocent try/except finding itself executing in the wrong
+ process. All code executing above WorkerProcess.run() on the stack
+ conceptually belongs to another program.
+ '''
+
+ try:
+ display.debug(u"WORKER HARD EXIT: %s" % to_text(e))
+ except BaseException:
+ # If the cause of the fault is IOError being generated by stdio,
+ # attempting to log a debug message may trigger another IOError.
+ # Try printing once then give up.
+ pass
+
+ os._exit(1)
+
+ def run(self):
+ '''
+ Wrap _run() to ensure no possibility an errant exception can cause
+ control to return to the StrategyBase task loop, or any other code
+ higher in the stack.
+
+ As multiprocessing in Python 2.x provides no protection, it is possible
+ a try/except added in far-away code can cause a crashed child process
+ to suddenly assume the role and prior state of its parent.
+ '''
+ try:
+ return self._run()
+ except BaseException as e:
+ self._hard_exit(e)
+ finally:
+ # This is a hack, pure and simple, to work around a potential deadlock
+ # in ``multiprocessing.Process`` when flushing stdout/stderr during process
+ # shutdown.
+ #
+ # We should no longer have a problem with ``Display``, as it now proxies over
+ # the queue from a fork. However, to avoid any issues with plugins that may
+ # be doing their own printing, this has been kept.
+ #
+ # This happens at the very end to avoid that deadlock, by simply side
+ # stepping it. This should not be treated as a long term fix.
+ #
+ # TODO: Evaluate migrating away from the ``fork`` multiprocessing start method.
+ sys.stdout = sys.stderr = open(os.devnull, 'w')
+
+ def _run(self):
+ '''
+ Called when the process is started. Pushes the result onto the
+ results queue. We also remove the host from the blocked hosts list, to
+ signify that they are ready for their next task.
+ '''
+
+ # import cProfile, pstats, StringIO
+ # pr = cProfile.Profile()
+ # pr.enable()
+
+ # Set the queue on Display so calls to Display.display are proxied over the queue
+ display.set_queue(self._final_q)
+
+ try:
+ # execute the task and build a TaskResult from the result
+ display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
+ executor_result = TaskExecutor(
+ self._host,
+ self._task,
+ self._task_vars,
+ self._play_context,
+ self._new_stdin,
+ self._loader,
+ self._shared_loader_obj,
+ self._final_q
+ ).run()
+
+ display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
+ self._host.vars = dict()
+ self._host.groups = []
+
+ # put the result on the result queue
+ display.debug("sending task result for task %s" % self._task._uuid)
+ self._final_q.send_task_result(
+ self._host.name,
+ self._task._uuid,
+ executor_result,
+ task_fields=self._task.dump_attrs(),
+ )
+ display.debug("done sending task result for task %s" % self._task._uuid)
+
+ except AnsibleConnectionFailure:
+ self._host.vars = dict()
+ self._host.groups = []
+ self._final_q.send_task_result(
+ self._host.name,
+ self._task._uuid,
+ dict(unreachable=True),
+ task_fields=self._task.dump_attrs(),
+ )
+
+ except Exception as e:
+ if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
+ try:
+ self._host.vars = dict()
+ self._host.groups = []
+ self._final_q.send_task_result(
+ self._host.name,
+ self._task._uuid,
+ dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
+ task_fields=self._task.dump_attrs(),
+ )
+ except Exception:
+ display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
+ display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
+ finally:
+ self._clean_up()
+
+ display.debug("WORKER PROCESS EXITING")
+
+ # pr.disable()
+ # s = StringIO.StringIO()
+ # sortby = 'time'
+ # ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
+ # ps.print_stats()
+ # with open('worker_%06d.stats' % os.getpid(), 'w') as f:
+ # f.write(s.getvalue())
+
+ def _clean_up(self):
+ # NOTE: see note in init about forks
+ # ensure we cleanup all temp files for this worker
+ self._loader.cleanup_all_tmp_files()
diff --git a/lib/ansible/executor/stats.py b/lib/ansible/executor/stats.py
new file mode 100644
index 0000000..13a053b
--- /dev/null
+++ b/lib/ansible/executor/stats.py
@@ -0,0 +1,100 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import MutableMapping
+
+from ansible.utils.vars import merge_hash
+
+
+class AggregateStats:
+ ''' holds stats about per-host activity during playbook runs '''
+
+ def __init__(self):
+
+ self.processed = {}
+ self.failures = {}
+ self.ok = {}
+ self.dark = {}
+ self.changed = {}
+ self.skipped = {}
+ self.rescued = {}
+ self.ignored = {}
+
+ # user defined stats, which can be per host or global
+ self.custom = {}
+
+ def increment(self, what, host):
+ ''' helper function to bump a statistic '''
+
+ self.processed[host] = 1
+ prev = (getattr(self, what)).get(host, 0)
+ getattr(self, what)[host] = prev + 1
+
+ def decrement(self, what, host):
+ _what = getattr(self, what)
+ try:
+ if _what[host] - 1 < 0:
+ # This should never happen, but let's be safe
+ raise KeyError("Don't be so negative")
+ _what[host] -= 1
+ except KeyError:
+ _what[host] = 0
+
+ def summarize(self, host):
+ ''' return information about a particular host '''
+
+ return dict(
+ ok=self.ok.get(host, 0),
+ failures=self.failures.get(host, 0),
+ unreachable=self.dark.get(host, 0),
+ changed=self.changed.get(host, 0),
+ skipped=self.skipped.get(host, 0),
+ rescued=self.rescued.get(host, 0),
+ ignored=self.ignored.get(host, 0),
+ )
+
+ def set_custom_stats(self, which, what, host=None):
+ ''' allow setting of a custom stat'''
+
+ if host is None:
+ host = '_run'
+ if host not in self.custom:
+ self.custom[host] = {which: what}
+ else:
+ self.custom[host][which] = what
+
+ def update_custom_stats(self, which, what, host=None):
+ ''' allow aggregation of a custom stat'''
+
+ if host is None:
+ host = '_run'
+ if host not in self.custom or which not in self.custom[host]:
+ return self.set_custom_stats(which, what, host)
+
+ # mismatching types
+ if not isinstance(what, type(self.custom[host][which])):
+ return None
+
+ if isinstance(what, MutableMapping):
+ self.custom[host][which] = merge_hash(self.custom[host][which], what)
+ else:
+ # let overloaded + take care of other types
+ self.custom[host][which] += what
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
new file mode 100644
index 0000000..02ace8f
--- /dev/null
+++ b/lib/ansible/executor/task_executor.py
@@ -0,0 +1,1239 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pty
+import time
+import json
+import signal
+import subprocess
+import sys
+import termios
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
+from ansible.executor.task_result import TaskResult
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import binary_type
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.connection import write_to_file_descriptor
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.task import Task
+from ansible.plugins import get_plugin_class
+from ansible.plugins.loader import become_loader, cliconf_loader, connection_loader, httpapi_loader, netconf_loader, terminal_loader
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
+from ansible.vars.clean import namespace_facts, clean_facts
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars, isidentifier
+
+display = Display()
+
+
+RETURN_VARS = [x for x in C.MAGIC_VARIABLE_MAPPING.items() if 'become' not in x and '_pass' not in x]
+
+__all__ = ['TaskExecutor']
+
+
+class TaskTimeoutError(BaseException):
+ pass
+
+
+def task_timeout(signum, frame):
+ raise TaskTimeoutError
+
+
+def remove_omit(task_args, omit_token):
+ '''
+ Remove args with a value equal to the ``omit_token`` recursively
+ to align with now having suboptions in the argument_spec
+ '''
+
+ if not isinstance(task_args, dict):
+ return task_args
+
+ new_args = {}
+ for i in task_args.items():
+ if i[1] == omit_token:
+ continue
+ elif isinstance(i[1], dict):
+ new_args[i[0]] = remove_omit(i[1], omit_token)
+ elif isinstance(i[1], list):
+ new_args[i[0]] = [remove_omit(v, omit_token) for v in i[1]]
+ else:
+ new_args[i[0]] = i[1]
+
+ return new_args
+
+
+class TaskExecutor:
+
+ '''
+ This is the main worker class for the executor pipeline, which
+ handles loading an action plugin to actually dispatch the task to
+ a given host. This class roughly corresponds to the old Runner()
+ class.
+ '''
+
+ def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q):
+ self._host = host
+ self._task = task
+ self._job_vars = job_vars
+ self._play_context = play_context
+ self._new_stdin = new_stdin
+ self._loader = loader
+ self._shared_loader_obj = shared_loader_obj
+ self._connection = None
+ self._final_q = final_q
+ self._loop_eval_error = None
+
+ self._task.squash()
+
+ def run(self):
+ '''
+ The main executor entrypoint, where we determine if the specified
+ task requires looping and either runs the task with self._run_loop()
+ or self._execute(). After that, the returned results are parsed and
+ returned as a dict.
+ '''
+
+ display.debug("in run() - task %s" % self._task._uuid)
+
+ try:
+ try:
+ items = self._get_loop_items()
+ except AnsibleUndefinedVariable as e:
+ # save the error raised here for use later
+ items = None
+ self._loop_eval_error = e
+
+ if items is not None:
+ if len(items) > 0:
+ item_results = self._run_loop(items)
+
+ # create the overall result item
+ res = dict(results=item_results)
+
+ # loop through the item results and set the global changed/failed/skipped result flags based on any item.
+ res['skipped'] = True
+ for item in item_results:
+ if 'changed' in item and item['changed'] and not res.get('changed'):
+ res['changed'] = True
+ if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])):
+ res['skipped'] = False
+ if 'failed' in item and item['failed']:
+ item_ignore = item.pop('_ansible_ignore_errors')
+ if not res.get('failed'):
+ res['failed'] = True
+ res['msg'] = 'One or more items failed'
+ self._task.ignore_errors = item_ignore
+ elif self._task.ignore_errors and not item_ignore:
+ self._task.ignore_errors = item_ignore
+
+ # ensure to accumulate these
+ for array in ['warnings', 'deprecations']:
+ if array in item and item[array]:
+ if array not in res:
+ res[array] = []
+ if not isinstance(item[array], list):
+ item[array] = [item[array]]
+ res[array] = res[array] + item[array]
+ del item[array]
+
+ if not res.get('failed', False):
+ res['msg'] = 'All items completed'
+ if res['skipped']:
+ res['msg'] = 'All items skipped'
+ else:
+ res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
+ else:
+ display.debug("calling self._execute()")
+ res = self._execute()
+ display.debug("_execute() done")
+
+ # make sure changed is set in the result, if it's not present
+ if 'changed' not in res:
+ res['changed'] = False
+
+ def _clean_res(res, errors='surrogate_or_strict'):
+ if isinstance(res, binary_type):
+ return to_unsafe_text(res, errors=errors)
+ elif isinstance(res, dict):
+ for k in res:
+ try:
+ res[k] = _clean_res(res[k], errors=errors)
+ except UnicodeError:
+ if k == 'diff':
+ # If this is a diff, substitute a replacement character if the value
+ # is undecodable as utf8. (Fix #21804)
+ display.warning("We were unable to decode all characters in the module return data."
+ " Replaced some in an effort to return as much as possible")
+ res[k] = _clean_res(res[k], errors='surrogate_then_replace')
+ else:
+ raise
+ elif isinstance(res, list):
+ for idx, item in enumerate(res):
+ res[idx] = _clean_res(item, errors=errors)
+ return res
+
+ display.debug("dumping result to json")
+ res = _clean_res(res)
+ display.debug("done dumping result, returning")
+ return res
+ except AnsibleError as e:
+ return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
+ except Exception as e:
+ return dict(failed=True, msg=wrap_var('Unexpected failure during module execution: %s' % (to_native(e, nonstring='simplerepr'))),
+ exception=to_text(traceback.format_exc()), stdout='', _ansible_no_log=self._play_context.no_log)
+ finally:
+ try:
+ self._connection.close()
+ except AttributeError:
+ pass
+ except Exception as e:
+ display.debug(u"error closing connection: %s" % to_text(e))
+
+ def _get_loop_items(self):
+ '''
+ Loads a lookup plugin to handle the with_* portion of a task (if specified),
+ and returns the items result.
+ '''
+
+ # get search path for this task to pass to lookup plugins
+ self._job_vars['ansible_search_path'] = self._task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
+ self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
+
+ templar = Templar(loader=self._loader, variables=self._job_vars)
+ items = None
+ loop_cache = self._job_vars.get('_ansible_loop_cache')
+ if loop_cache is not None:
+ # _ansible_loop_cache may be set in `get_vars` when calculating `delegate_to`
+ # to avoid reprocessing the loop
+ items = loop_cache
+ elif self._task.loop_with:
+ if self._task.loop_with in self._shared_loader_obj.lookup_loader:
+ fail = True
+ if self._task.loop_with == 'first_found':
+ # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
+ fail = False
+
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, fail_on_undefined=fail, convert_bare=False)
+ if not fail:
+ loop_terms = [t for t in loop_terms if not templar.is_template(t)]
+
+ # get lookup
+ mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar)
+
+ # give lookup task 'context' for subdir (mostly needed for first_found)
+ for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
+ if subdir in self._task.action:
+ break
+ setattr(mylookup, '_subdir', subdir + 's')
+
+ # run lookup
+ items = wrap_var(mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True))
+ else:
+ raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with)
+
+ elif self._task.loop is not None:
+ items = templar.template(self._task.loop)
+ if not isinstance(items, list):
+ raise AnsibleError(
+ "Invalid data passed to 'loop', it requires a list, got this instead: %s."
+ " Hint: If you passed a list/dict of just one element,"
+ " try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items
+ )
+
+ return items
+
+ def _run_loop(self, items):
+ '''
+ Runs the task with the loop items specified and collates the result
+ into an array named 'results' which is inserted into the final result
+ along with the item for which the loop ran.
+ '''
+ task_vars = self._job_vars
+ templar = Templar(loader=self._loader, variables=task_vars)
+
+ self._task.loop_control.post_validate(templar=templar)
+
+ loop_var = self._task.loop_control.loop_var
+ index_var = self._task.loop_control.index_var
+ loop_pause = self._task.loop_control.pause
+ extended = self._task.loop_control.extended
+ extended_allitems = self._task.loop_control.extended_allitems
+ # ensure we always have a label
+ label = self._task.loop_control.label or '{{' + loop_var + '}}'
+
+ if loop_var in task_vars:
+ display.warning(u"%s: The loop variable '%s' is already in use. "
+ u"You should set the `loop_var` value in the `loop_control` option for the task"
+ u" to something else to avoid variable collisions and unexpected behavior." % (self._task, loop_var))
+
+ ran_once = False
+ no_log = False
+ items_len = len(items)
+ results = []
+ for item_index, item in enumerate(items):
+ task_vars['ansible_loop_var'] = loop_var
+
+ task_vars[loop_var] = item
+ if index_var:
+ task_vars['ansible_index_var'] = index_var
+ task_vars[index_var] = item_index
+
+ if extended:
+ task_vars['ansible_loop'] = {
+ 'index': item_index + 1,
+ 'index0': item_index,
+ 'first': item_index == 0,
+ 'last': item_index + 1 == items_len,
+ 'length': items_len,
+ 'revindex': items_len - item_index,
+ 'revindex0': items_len - item_index - 1,
+ }
+ if extended_allitems:
+ task_vars['ansible_loop']['allitems'] = items
+ try:
+ task_vars['ansible_loop']['nextitem'] = items[item_index + 1]
+ except IndexError:
+ pass
+ if item_index - 1 >= 0:
+ task_vars['ansible_loop']['previtem'] = items[item_index - 1]
+
+ # Update template vars to reflect current loop iteration
+ templar.available_variables = task_vars
+
+ # pause between loop iterations
+ if loop_pause and ran_once:
+ time.sleep(loop_pause)
+ else:
+ ran_once = True
+
+ try:
+ tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
+ tmp_task._parent = self._task._parent
+ tmp_play_context = self._play_context.copy()
+ except AnsibleParserError as e:
+ results.append(dict(failed=True, msg=to_text(e)))
+ continue
+
+ # now we swap the internal task and play context with their copies,
+ # execute, and swap them back so we can do the next iteration cleanly
+ (self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
+ res = self._execute(variables=task_vars)
+ task_fields = self._task.dump_attrs()
+ (self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
+
+ # update 'general no_log' based on specific no_log
+ no_log = no_log or tmp_task.no_log
+
+ # now update the result with the item info, and append the result
+ # to the list of results
+ res[loop_var] = item
+ res['ansible_loop_var'] = loop_var
+ if index_var:
+ res[index_var] = item_index
+ res['ansible_index_var'] = index_var
+ if extended:
+ res['ansible_loop'] = task_vars['ansible_loop']
+
+ res['_ansible_item_result'] = True
+ res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')
+
+ # gets templated here unlike rest of loop_control fields, depends on loop_var above
+ try:
+ res['_ansible_item_label'] = templar.template(label)
+ except AnsibleUndefinedVariable as e:
+ res.update({
+ 'failed': True,
+ 'msg': 'Failed to template loop_control.label: %s' % to_text(e)
+ })
+
+ tr = TaskResult(
+ self._host.name,
+ self._task._uuid,
+ res,
+ task_fields=task_fields,
+ )
+ if tr.is_failed() or tr.is_unreachable():
+ self._final_q.send_callback('v2_runner_item_on_failed', tr)
+ elif tr.is_skipped():
+ self._final_q.send_callback('v2_runner_item_on_skipped', tr)
+ else:
+ if getattr(self._task, 'diff', False):
+ self._final_q.send_callback('v2_on_file_diff', tr)
+ if self._task.action not in C._ACTION_INVENTORY_TASKS:
+ self._final_q.send_callback('v2_runner_item_on_ok', tr)
+
+ results.append(res)
+ del task_vars[loop_var]
+
+ # clear 'connection related' plugin variables for next iteration
+ if self._connection:
+ clear_plugins = {
+ 'connection': self._connection._load_name,
+ 'shell': self._connection._shell._load_name
+ }
+ if self._connection.become:
+ clear_plugins['become'] = self._connection.become._load_name
+
+ for plugin_type, plugin_name in clear_plugins.items():
+ for var in C.config.get_plugin_vars(plugin_type, plugin_name):
+ if var in task_vars and var not in self._job_vars:
+ del task_vars[var]
+
+ self._task.no_log = no_log
+
+ return results
+
+ def _execute(self, variables=None):
+ '''
+ The primary workhorse of the executor system, this runs the task
+ on the specified host (which may be the delegated_to host) and handles
+ the retry/until and block rescue/always execution
+ '''
+
+ if variables is None:
+ variables = self._job_vars
+
+ templar = Templar(loader=self._loader, variables=variables)
+
+ context_validation_error = None
+
+ # a certain subset of variables exist.
+ tempvars = variables.copy()
+
+ try:
+ # TODO: remove play_context as this does not take delegation nor loops correctly into account,
+ # the task itself should hold the correct values for connection/shell/become/terminal plugin options to finalize.
+ # Kept for now for backwards compatibility and a few functions that are still exclusive to it.
+
+ # apply the given task's information to the connection info,
+ # which may override some fields already set by the play or
+ # the options specified on the command line
+ self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
+
+ # fields set from the play/task may be based on variables, so we have to
+ # do the same kind of post validation step on it here before we use it.
+ self._play_context.post_validate(templar=templar)
+
+ # now that the play context is finalized, if the remote_addr is not set
+ # default to using the host's address field as the remote address
+ if not self._play_context.remote_addr:
+ self._play_context.remote_addr = self._host.address
+
+ # We also add "magic" variables back into the variables dict to make sure
+ self._play_context.update_vars(tempvars)
+
+ except AnsibleError as e:
+ # save the error, which we'll raise later if we don't end up
+ # skipping this task during the conditional evaluation step
+ context_validation_error = e
+
+ no_log = self._play_context.no_log
+
+ # Evaluate the conditional (if any) for this task, which we do before running
+ # the final task post-validation. We do this before the post validation due to
+ # the fact that the conditional may specify that the task be skipped due to a
+ # variable not being present which would otherwise cause validation to fail
+ try:
+ if not self._task.evaluate_conditional(templar, tempvars):
+ display.debug("when evaluation is False, skipping this task")
+ return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=no_log)
+ except AnsibleError as e:
+ # loop error takes precedence
+ if self._loop_eval_error is not None:
+ # Display the error from the conditional as well to prevent
+ # losing information useful for debugging.
+ display.v(to_text(e))
+ raise self._loop_eval_error # pylint: disable=raising-bad-type
+ raise
+
+ # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
+ if self._loop_eval_error is not None:
+ raise self._loop_eval_error # pylint: disable=raising-bad-type
+
+ # if we ran into an error while setting up the PlayContext, raise it now, unless is known issue with delegation
+ # and undefined vars (correct values are in cvars later on and connection plugins, if still error, blows up there)
+ if context_validation_error is not None:
+ raiseit = True
+ if self._task.delegate_to:
+ if isinstance(context_validation_error, AnsibleUndefinedVariable):
+ raiseit = False
+ elif isinstance(context_validation_error, AnsibleParserError):
+ # parser error, might be cause by undef too
+ orig_exc = getattr(context_validation_error, 'orig_exc', None)
+ if isinstance(orig_exc, AnsibleUndefinedVariable):
+ raiseit = False
+ if raiseit:
+ raise context_validation_error # pylint: disable=raising-bad-type
+
+ # set templar to use temp variables until loop is evaluated
+ templar.available_variables = tempvars
+
+ # if this task is a TaskInclude, we just return now with a success code so the
+ # main thread can expand the task list for the given host
+ if self._task.action in C._ACTION_ALL_INCLUDE_TASKS:
+ include_args = self._task.args.copy()
+ include_file = include_args.pop('_raw_params', None)
+ if not include_file:
+ return dict(failed=True, msg="No include file was specified to the include")
+
+ include_file = templar.template(include_file)
+ return dict(include=include_file, include_args=include_args)
+
+ # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
+ elif self._task.action in C._ACTION_INCLUDE_ROLE:
+ include_args = self._task.args.copy()
+ return dict(include_args=include_args)
+
+ # Now we do final validation on the task, which sets all fields to their final values.
+ try:
+ self._task.post_validate(templar=templar)
+ except AnsibleError:
+ raise
+ except Exception:
+ return dict(changed=False, failed=True, _ansible_no_log=no_log, exception=to_text(traceback.format_exc()))
+ if '_variable_params' in self._task.args:
+ variable_params = self._task.args.pop('_variable_params')
+ if isinstance(variable_params, dict):
+ if C.INJECT_FACTS_AS_VARS:
+ display.warning("Using a variable for a task's 'args' is unsafe in some situations "
+ "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
+ variable_params.update(self._task.args)
+ self._task.args = variable_params
+ else:
+ # if we didn't get a dict, it means there's garbage remaining after k=v parsing, just give up
+ # see https://github.com/ansible/ansible/issues/79862
+ raise AnsibleError(f"invalid or malformed argument: '{variable_params}'")
+
+ # update no_log to task value, now that we have it templated
+ no_log = self._task.no_log
+
+ # free tempvars up, not used anymore, cvars and vars_copy should be mainly used after this point
+ # updating the original 'variables' at the end
+ tempvars = {}
+
+ # setup cvars copy, used for all connection related templating
+ if self._task.delegate_to:
+ # use vars from delegated host (which already include task vars) instead of original host
+ cvars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {})
+ else:
+ # just use normal host vars
+ cvars = variables
+
+ templar.available_variables = cvars
+
+ # use magic var if it exists, if not, let task inheritance do it's thing.
+ if cvars.get('ansible_connection') is not None:
+ current_connection = templar.template(cvars['ansible_connection'])
+ else:
+ current_connection = self._task.connection
+
+ # get the connection and the handler for this execution
+ if (not self._connection or
+ not getattr(self._connection, 'connected', False) or
+ not self._connection.matches_name([current_connection]) or
+ # pc compare, left here for old plugins, but should be irrelevant for those
+ # using get_option, since they are cleared each iteration.
+ self._play_context.remote_addr != self._connection._play_context.remote_addr):
+ self._connection = self._get_connection(cvars, templar, current_connection)
+ else:
+ # if connection is reused, its _play_context is no longer valid and needs
+ # to be replaced with the one templated above, in case other data changed
+ self._connection._play_context = self._play_context
+ self._set_become_plugin(cvars, templar, self._connection)
+
+ plugin_vars = self._set_connection_options(cvars, templar)
+
+ # make a copy of the job vars here, as we update them here and later,
+ # but don't want to pollute original
+ vars_copy = variables.copy()
+ # update with connection info (i.e ansible_host/ansible_user)
+ self._connection.update_vars(vars_copy)
+ templar.available_variables = vars_copy
+
+ # TODO: eventually remove as pc is taken out of the resolution path
+ # feed back into pc to ensure plugins not using get_option can get correct value
+ self._connection._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=vars_copy, templar=templar)
+
+ # for persistent connections, initialize socket path and start connection manager
+ if any(((self._connection.supports_persistence and C.USE_PERSISTENT_CONNECTIONS), self._connection.force_persistence)):
+ self._play_context.timeout = self._connection.get_option('persistent_command_timeout')
+ display.vvvv('attempting to start connection', host=self._play_context.remote_addr)
+ display.vvvv('using connection plugin %s' % self._connection.transport, host=self._play_context.remote_addr)
+
+ options = self._connection.get_options()
+ socket_path = start_connection(self._play_context, options, self._task._uuid)
+ display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr)
+ setattr(self._connection, '_socket_path', socket_path)
+
+ # TODO: eventually remove this block as this should be a 'consequence' of 'forced_local' modules
+ # special handling for python interpreter for network_os, default to ansible python unless overridden
+ if 'ansible_network_os' in cvars and 'ansible_python_interpreter' not in cvars:
+ # this also avoids 'python discovery'
+ cvars['ansible_python_interpreter'] = sys.executable
+
+ # get handler
+ self._handler, module_context = self._get_action_handler_with_module_context(connection=self._connection, templar=templar)
+
+ if module_context is not None:
+ module_defaults_fqcn = module_context.resolved_fqcn
+ else:
+ module_defaults_fqcn = self._task.resolved_action
+
+ # Apply default params for action/module, if present
+ self._task.args = get_action_args_with_defaults(
+ module_defaults_fqcn, self._task.args, self._task.module_defaults, templar,
+ action_groups=self._task._parent._play._action_groups
+ )
+
+ # And filter out any fields which were set to default(omit), and got the omit token value
+ omit_token = variables.get('omit')
+ if omit_token is not None:
+ self._task.args = remove_omit(self._task.args, omit_token)
+
+ # Read some values from the task, so that we can modify them if need be
+ if self._task.until:
+ retries = self._task.retries
+ if retries is None:
+ retries = 3
+ elif retries <= 0:
+ retries = 1
+ else:
+ retries += 1
+ else:
+ retries = 1
+
+ delay = self._task.delay
+ if delay < 0:
+ delay = 1
+
+ display.debug("starting attempt loop")
+ result = None
+ for attempt in range(1, retries + 1):
+ display.debug("running the handler")
+ try:
+ if self._task.timeout:
+ old_sig = signal.signal(signal.SIGALRM, task_timeout)
+ signal.alarm(self._task.timeout)
+ result = self._handler.run(task_vars=vars_copy)
+ except (AnsibleActionFail, AnsibleActionSkip) as e:
+ return e.result
+ except AnsibleConnectionFailure as e:
+ return dict(unreachable=True, msg=to_text(e))
+ except TaskTimeoutError as e:
+ msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
+ return dict(failed=True, msg=msg)
+ finally:
+ if self._task.timeout:
+ signal.alarm(0)
+ old_sig = signal.signal(signal.SIGALRM, old_sig)
+ self._handler.cleanup()
+ display.debug("handler run complete")
+
+ # preserve no log
+ result["_ansible_no_log"] = no_log
+
+ if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
+ result = wrap_var(result)
+
+ # update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ if self._task.register:
+ if not isidentifier(self._task.register):
+ raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)
+
+ vars_copy[self._task.register] = result
+
+ if self._task.async_val > 0:
+ if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
+ result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
+ if result.get('failed'):
+ self._final_q.send_callback(
+ 'v2_runner_on_async_failed',
+ TaskResult(self._host.name,
+ self._task._uuid,
+ result,
+ task_fields=self._task.dump_attrs()))
+ else:
+ self._final_q.send_callback(
+ 'v2_runner_on_async_ok',
+ TaskResult(self._host.name,
+ self._task._uuid,
+ result,
+ task_fields=self._task.dump_attrs()))
+
+ # ensure no log is preserved
+ result["_ansible_no_log"] = no_log
+
+ # helper methods for use below in evaluating changed/failed_when
+ def _evaluate_changed_when_result(result):
+ if self._task.changed_when is not None and self._task.changed_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.changed_when
+ result['changed'] = cond.evaluate_conditional(templar, vars_copy)
+
+ def _evaluate_failed_when_result(result):
+ if self._task.failed_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.failed_when
+ failed_when_result = cond.evaluate_conditional(templar, vars_copy)
+ result['failed_when_result'] = result['failed'] = failed_when_result
+ else:
+ failed_when_result = False
+ return failed_when_result
+
+ if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
+ if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
+ if self._task.delegate_to and self._task.delegate_facts:
+ if '_ansible_delegated_vars' in vars_copy:
+ vars_copy['_ansible_delegated_vars'].update(result['ansible_facts'])
+ else:
+ vars_copy['_ansible_delegated_vars'] = result['ansible_facts']
+ else:
+ vars_copy.update(result['ansible_facts'])
+ else:
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ af = wrap_var(result['ansible_facts'])
+ vars_copy['ansible_facts'] = combine_vars(vars_copy.get('ansible_facts', {}), namespace_facts(af))
+ if C.INJECT_FACTS_AS_VARS:
+ vars_copy.update(clean_facts(af))
+
+ # set the failed property if it was missing.
+ if 'failed' not in result:
+ # rc is here for backwards compatibility and modules that use it instead of 'failed'
+ if 'rc' in result and result['rc'] not in [0, "0"]:
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ # Make attempts and retries available early to allow their use in changed/failed_when
+ if self._task.until:
+ result['attempts'] = attempt
+
+ # set the changed property if it was missing.
+ if 'changed' not in result:
+ result['changed'] = False
+
+ if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
+ result = wrap_var(result)
+
+ # re-update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ # This gives changed/failed_when access to additional recently modified
+ # attributes of result
+ if self._task.register:
+ vars_copy[self._task.register] = result
+
+ # if we didn't skip this task, use the helpers to evaluate the changed/
+ # failed_when properties
+ if 'skipped' not in result:
+ condname = 'changed'
+
+ try:
+ _evaluate_changed_when_result(result)
+ condname = 'failed'
+ _evaluate_failed_when_result(result)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['%s_when_result' % condname] = to_text(e)
+
+ if retries > 1:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.until
+ if cond.evaluate_conditional(templar, vars_copy):
+ break
+ else:
+ # no conditional check, or it failed, so sleep for the specified time
+ if attempt < retries:
+ result['_ansible_retry'] = True
+ result['retries'] = retries
+ display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
+ self._final_q.send_callback(
+ 'v2_runner_retry',
+ TaskResult(
+ self._host.name,
+ self._task._uuid,
+ result,
+ task_fields=self._task.dump_attrs()
+ )
+ )
+ time.sleep(delay)
+ self._handler = self._get_action_handler(connection=self._connection, templar=templar)
+ else:
+ if retries > 1:
+ # we ran out of attempts, so mark the result as failed
+ result['attempts'] = retries - 1
+ result['failed'] = True
+
+ if self._task.action not in C._ACTION_WITH_CLEAN_FACTS:
+ result = wrap_var(result)
+
+ # do the final update of the local variables here, for both registered
+ # values and any facts which may have been created
+ if self._task.register:
+ variables[self._task.register] = result
+
+ if 'ansible_facts' in result and self._task.action not in C._ACTION_DEBUG:
+ if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
+ variables.update(result['ansible_facts'])
+ else:
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ af = wrap_var(result['ansible_facts'])
+ variables['ansible_facts'] = combine_vars(variables.get('ansible_facts', {}), namespace_facts(af))
+ if C.INJECT_FACTS_AS_VARS:
+ variables.update(clean_facts(af))
+
+ # save the notification target in the result, if it was specified, as
+ # this task may be running in a loop in which case the notification
+ # may be item-specific, ie. "notify: service {{item}}"
+ if self._task.notify is not None:
+ result['_ansible_notify'] = self._task.notify
+
+ # add the delegated vars to the result, so we can reference them
+ # on the results side without having to do any further templating
+ # also now add connection vars results when delegating
+ if self._task.delegate_to:
+ result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
+ for k in plugin_vars:
+ result["_ansible_delegated_vars"][k] = cvars.get(k)
+
+ # note: here for callbacks that rely on this info to display delegation
+ for requireshed in ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection'):
+ if requireshed not in result["_ansible_delegated_vars"] and requireshed in cvars:
+ result["_ansible_delegated_vars"][requireshed] = cvars.get(requireshed)
+
+ # and return
+ display.debug("attempt loop complete, returning result")
+ return result
+
+ def _poll_async_result(self, result, templar, task_vars=None):
+ '''
+ Polls for the specified JID to be complete
+ '''
+
+ if task_vars is None:
+ task_vars = self._job_vars
+
+ async_jid = result.get('ansible_job_id')
+ if async_jid is None:
+ return dict(failed=True, msg="No job id was returned by the async task")
+
+ # Create a new pseudo-task to run the async_status module, and run
+ # that (with a sleep for "poll" seconds between each retry) until the
+ # async time limit is exceeded.
+
+ async_task = Task.load(dict(action='async_status', args={'jid': async_jid}, environment=self._task.environment))
+
+ # FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
+ # Because this is an async task, the action handler is async. However,
+ # we need the 'normal' action handler for the status check, so get it
+ # now via the action_loader
+ async_handler = self._shared_loader_obj.action_loader.get(
+ 'ansible.legacy.async_status',
+ task=async_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+
+ time_left = self._task.async_val
+ while time_left > 0:
+ time.sleep(self._task.poll)
+
+ try:
+ async_result = async_handler.run(task_vars=task_vars)
+ # We do not bail out of the loop in cases where the failure
+ # is associated with a parsing error. The async_runner can
+ # have issues which result in a half-written/unparseable result
+ # file on disk, which manifests to the user as a timeout happening
+ # before it's time to timeout.
+ if (int(async_result.get('finished', 0)) == 1 or
+ ('failed' in async_result and async_result.get('_ansible_parsed', False)) or
+ 'skipped' in async_result):
+ break
+ except Exception as e:
+ # Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
+ # On an exception, call the connection's reset method if it has one
+ # (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
+ display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
+ display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
+ try:
+ async_handler._connection.reset()
+ except AttributeError:
+ pass
+
+ # Little hack to raise the exception if we've exhausted the timeout period
+ time_left -= self._task.poll
+ if time_left <= 0:
+ raise
+ else:
+ time_left -= self._task.poll
+ self._final_q.send_callback(
+ 'v2_runner_on_async_poll',
+ TaskResult(
+ self._host.name,
+ async_task._uuid,
+ async_result,
+ task_fields=async_task.dump_attrs(),
+ ),
+ )
+
+ if int(async_result.get('finished', 0)) != 1:
+ if async_result.get('_ansible_parsed'):
+ return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val, async_result=async_result)
+ else:
+ return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
+ else:
+ # If the async task finished, automatically cleanup the temporary
+ # status file left behind.
+ cleanup_task = Task.load(
+ {
+ 'async_status': {
+ 'jid': async_jid,
+ 'mode': 'cleanup',
+ },
+ 'environment': self._task.environment,
+ }
+ )
+ cleanup_handler = self._shared_loader_obj.action_loader.get(
+ 'ansible.legacy.async_status',
+ task=cleanup_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+ cleanup_handler.run(task_vars=task_vars)
+ cleanup_handler.cleanup(force=True)
+ async_handler.cleanup(force=True)
+ return async_result
+
+ def _get_become(self, name):
+ become = become_loader.get(name)
+ if not become:
+ raise AnsibleError("Invalid become method specified, could not find matching plugin: '%s'. "
+ "Use `ansible-doc -t become -l` to list available plugins." % name)
+ return become
+
+ def _get_connection(self, cvars, templar, current_connection):
+ '''
+ Reads the connection property for the host, and returns the
+ correct connection object from the list of connection plugins
+ '''
+
+ self._play_context.connection = current_connection
+
+ # TODO: play context has logic to update the connection for 'smart'
+ # (default value, will chose between ssh and paramiko) and 'persistent'
+ # (really paramiko), eventually this should move to task object itself.
+ conn_type = self._play_context.connection
+
+ connection, plugin_load_context = self._shared_loader_obj.connection_loader.get_with_context(
+ conn_type,
+ self._play_context,
+ self._new_stdin,
+ task_uuid=self._task._uuid,
+ ansible_playbook_pid=to_text(os.getppid())
+ )
+
+ if not connection:
+ raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
+
+ self._set_become_plugin(cvars, templar, connection)
+
+ # Also backwards compat call for those still using play_context
+ self._play_context.set_attributes_from_plugin(connection)
+
+ return connection
+
+ def _set_become_plugin(self, cvars, templar, connection):
+ # load become plugin if needed
+ if cvars.get('ansible_become') is not None:
+ become = boolean(templar.template(cvars['ansible_become']))
+ else:
+ become = self._task.become
+
+ if become:
+ if cvars.get('ansible_become_method'):
+ become_plugin = self._get_become(templar.template(cvars['ansible_become_method']))
+ else:
+ become_plugin = self._get_become(self._task.become_method)
+
+ else:
+ # If become is not enabled on the task it needs to be removed from the connection plugin
+ # https://github.com/ansible/ansible/issues/78425
+ become_plugin = None
+
+ try:
+ connection.set_become_plugin(become_plugin)
+ except AttributeError:
+ # Older connection plugin that does not support set_become_plugin
+ pass
+
+ if become_plugin:
+ if getattr(connection.become, 'require_tty', False) and not getattr(connection, 'has_tty', False):
+ raise AnsibleError(
+ "The '%s' connection does not provide a TTY which is required for the selected "
+ "become plugin: %s." % (connection._load_name, become_plugin.name)
+ )
+
+ # Backwards compat for connection plugins that don't support become plugins
+ # Just do this unconditionally for now, we could move it inside of the
+ # AttributeError above later
+ self._play_context.set_become_plugin(become_plugin.name)
+
+ def _set_plugin_options(self, plugin_type, variables, templar, task_keys):
+ try:
+ plugin = getattr(self._connection, '_%s' % plugin_type)
+ except AttributeError:
+ # Some plugins are assigned to private attrs, ``become`` is not
+ plugin = getattr(self._connection, plugin_type)
+
+ # network_cli's "real" connection plugin is not named connection
+ # to avoid the confusion of having connection.connection
+ if plugin_type == "ssh_type_conn":
+ plugin_type = "connection"
+ option_vars = C.config.get_plugin_vars(plugin_type, plugin._load_name)
+ options = {}
+ for k in option_vars:
+ if k in variables:
+ options[k] = templar.template(variables[k])
+ # TODO move to task method?
+ plugin.set_options(task_keys=task_keys, var_options=options)
+
+ return option_vars
+
+ def _set_connection_options(self, variables, templar):
+
+ # keep list of variable names possibly consumed
+ varnames = []
+
+ # grab list of usable vars for this plugin
+ option_vars = C.config.get_plugin_vars('connection', self._connection._load_name)
+ varnames.extend(option_vars)
+
+ # create dict of 'templated vars'
+ options = {'_extras': {}}
+ for k in option_vars:
+ if k in variables:
+ options[k] = templar.template(variables[k])
+
+ # add extras if plugin supports them
+ if getattr(self._connection, 'allow_extras', False):
+ for k in variables:
+ if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options:
+ options['_extras'][k] = templar.template(variables[k])
+
+ task_keys = self._task.dump_attrs()
+
+ # The task_keys 'timeout' attr is the task's timeout, not the connection timeout.
+ # The connection timeout is threaded through the play_context for now.
+ task_keys['timeout'] = self._play_context.timeout
+
+ if self._play_context.password:
+ # The connection password is threaded through the play_context for
+ # now. This is something we ultimately want to avoid, but the first
+ # step is to get connection plugins pulling the password through the
+ # config system instead of directly accessing play_context.
+ task_keys['password'] = self._play_context.password
+
+ # Prevent task retries from overriding connection retries
+ del task_keys['retries']
+
+ # set options with 'templated vars' specific to this plugin and dependent ones
+ self._connection.set_options(task_keys=task_keys, var_options=options)
+ varnames.extend(self._set_plugin_options('shell', variables, templar, task_keys))
+
+ if self._connection.become is not None:
+ if self._play_context.become_pass:
+ # FIXME: eventually remove from task and play_context, here for backwards compat
+ # keep out of play objects to avoid accidental disclosure, only become plugin should have
+ # The become pass is already in the play_context if given on
+ # the CLI (-K). Make the plugin aware of it in this case.
+ task_keys['become_pass'] = self._play_context.become_pass
+
+ varnames.extend(self._set_plugin_options('become', variables, templar, task_keys))
+
+ # FOR BACKWARDS COMPAT:
+ for option in ('become_user', 'become_flags', 'become_exe', 'become_pass'):
+ try:
+ setattr(self._play_context, option, self._connection.become.get_option(option))
+ except KeyError:
+ pass # some plugins don't support all base flags
+ self._play_context.prompt = self._connection.become.prompt
+
+ # deals with networking sub_plugins (network_cli/httpapi/netconf)
+ sub = getattr(self._connection, '_sub_plugin', None)
+ if sub is not None and sub.get('type') != 'external':
+ plugin_type = get_plugin_class(sub.get("obj"))
+ varnames.extend(self._set_plugin_options(plugin_type, variables, templar, task_keys))
+ sub_conn = getattr(self._connection, 'ssh_type_conn', None)
+ if sub_conn is not None:
+ varnames.extend(self._set_plugin_options("ssh_type_conn", variables, templar, task_keys))
+
+ return varnames
+
+ def _get_action_handler(self, connection, templar):
+ '''
+ Returns the correct action plugin to handle the requestion task action
+ '''
+ return self._get_action_handler_with_module_context(connection, templar)[0]
+
+ def _get_action_handler_with_module_context(self, connection, templar):
+ '''
+ Returns the correct action plugin to handle the requestion task action and the module context
+ '''
+ module_collection, separator, module_name = self._task.action.rpartition(".")
+ module_prefix = module_name.split('_')[0]
+ if module_collection:
+ # For network modules, which look for one action plugin per platform, look for the
+ # action plugin in the same collection as the module by prefixing the action plugin
+ # with the same collection.
+ network_action = "{0}.{1}".format(module_collection, module_prefix)
+ else:
+ network_action = module_prefix
+
+ collections = self._task.collections
+
+ # Check if the module has specified an action handler
+ module = self._shared_loader_obj.module_loader.find_plugin_with_context(
+ self._task.action, collection_list=collections
+ )
+ if not module.resolved or not module.action_plugin:
+ module = None
+ if module is not None:
+ handler_name = module.action_plugin
+ # let action plugin override module, fallback to 'normal' action plugin otherwise
+ elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections):
+ handler_name = self._task.action
+ elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):
+ handler_name = network_action
+ display.vvvv("Using network group action {handler} for {action}".format(handler=handler_name,
+ action=self._task.action),
+ host=self._play_context.remote_addr)
+ else:
+ # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search
+ handler_name = 'ansible.legacy.normal'
+ collections = None # until then, we don't want the task's collection list to be consulted; use the builtin
+
+ handler = self._shared_loader_obj.action_loader.get(
+ handler_name,
+ task=self._task,
+ connection=connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ collection_list=collections
+ )
+
+ if not handler:
+ raise AnsibleError("the handler '%s' was not found" % handler_name)
+
+ return handler, module
+
+
+def start_connection(play_context, options, task_uuid):
+ '''
+ Starts the persistent connection
+ '''
+ candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])]
+ candidate_paths.extend(os.environ.get('PATH', '').split(os.pathsep))
+ for dirname in candidate_paths:
+ ansible_connection = os.path.join(dirname, 'ansible-connection')
+ if os.path.isfile(ansible_connection):
+ display.vvvv("Found ansible-connection at path {0}".format(ansible_connection))
+ break
+ else:
+ raise AnsibleError("Unable to find location of 'ansible-connection'. "
+ "Please set or check the value of ANSIBLE_CONNECTION_PATH")
+
+ env = os.environ.copy()
+ env.update({
+ # HACK; most of these paths may change during the controller's lifetime
+ # (eg, due to late dynamic role includes, multi-playbook execution), without a way
+ # to invalidate/update, ansible-connection won't always see the same plugins the controller
+ # can.
+ 'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(),
+ 'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(),
+ 'ANSIBLE_COLLECTIONS_PATH': to_native(os.pathsep.join(AnsibleCollectionConfig.collection_paths)),
+ 'ANSIBLE_CONNECTION_PLUGINS': connection_loader.print_paths(),
+ 'ANSIBLE_HTTPAPI_PLUGINS': httpapi_loader.print_paths(),
+ 'ANSIBLE_NETCONF_PLUGINS': netconf_loader.print_paths(),
+ 'ANSIBLE_TERMINAL_PLUGINS': terminal_loader.print_paths(),
+ })
+ verbosity = []
+ if display.verbosity:
+ verbosity.append('-%s' % ('v' * display.verbosity))
+ python = sys.executable
+ master, slave = pty.openpty()
+ p = subprocess.Popen(
+ [python, ansible_connection, *verbosity, to_text(os.getppid()), to_text(task_uuid)],
+ stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
+ )
+ os.close(slave)
+
+ # We need to set the pty into noncanonical mode. This ensures that we
+ # can receive lines longer than 4095 characters (plus newline) without
+ # truncating.
+ old = termios.tcgetattr(master)
+ new = termios.tcgetattr(master)
+ new[3] = new[3] & ~termios.ICANON
+
+ try:
+ termios.tcsetattr(master, termios.TCSANOW, new)
+ write_to_file_descriptor(master, options)
+ write_to_file_descriptor(master, play_context.serialize())
+
+ (stdout, stderr) = p.communicate()
+ finally:
+ termios.tcsetattr(master, termios.TCSANOW, old)
+ os.close(master)
+
+ if p.returncode == 0:
+ result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
+ else:
+ try:
+ result = json.loads(to_text(stderr, errors='surrogate_then_replace'))
+ except getattr(json.decoder, 'JSONDecodeError', ValueError):
+ # JSONDecodeError only available on Python 3.5+
+ result = {'error': to_text(stderr, errors='surrogate_then_replace')}
+
+ if 'messages' in result:
+ for level, message in result['messages']:
+ if level == 'log':
+ display.display(message, log_only=True)
+ elif level in ('debug', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv'):
+ getattr(display, level)(message, host=play_context.remote_addr)
+ else:
+ if hasattr(display, level):
+ getattr(display, level)(message)
+ else:
+ display.vvvv(message, host=play_context.remote_addr)
+
+ if 'error' in result:
+ if display.verbosity > 2:
+ if result.get('exception'):
+ msg = "The full traceback is:\n" + result['exception']
+ display.display(msg, color=C.COLOR_ERROR)
+ raise AnsibleError(result['error'])
+
+ return result['socket_path']
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
new file mode 100644
index 0000000..dcfc38a
--- /dev/null
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -0,0 +1,456 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import tempfile
+import threading
+import time
+import multiprocessing.queues
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.executor.play_iterator import PlayIterator
+from ansible.executor.stats import AggregateStats
+from ansible.executor.task_result import TaskResult
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_native
+from ansible.playbook.play_context import PlayContext
+from ansible.playbook.task import Task
+from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
+from ansible.plugins.callback import CallbackBase
+from ansible.template import Templar
+from ansible.vars.hostvars import HostVars
+from ansible.vars.reserved import warn_if_reserved
+from ansible.utils.display import Display
+from ansible.utils.lock import lock_decorator
+from ansible.utils.multiprocessing import context as multiprocessing_context
+
+
+__all__ = ['TaskQueueManager']
+
+display = Display()
+
+
+class CallbackSend:
+ def __init__(self, method_name, *args, **kwargs):
+ self.method_name = method_name
+ self.args = args
+ self.kwargs = kwargs
+
+
+class DisplaySend:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FinalQueue(multiprocessing.queues.Queue):
+ def __init__(self, *args, **kwargs):
+ kwargs['ctx'] = multiprocessing_context
+ super(FinalQueue, self).__init__(*args, **kwargs)
+
+ def send_callback(self, method_name, *args, **kwargs):
+ self.put(
+ CallbackSend(method_name, *args, **kwargs),
+ block=False
+ )
+
+ def send_task_result(self, *args, **kwargs):
+ if isinstance(args[0], TaskResult):
+ tr = args[0]
+ else:
+ tr = TaskResult(*args, **kwargs)
+ self.put(
+ tr,
+ block=False
+ )
+
+ def send_display(self, *args, **kwargs):
+ self.put(
+ DisplaySend(*args, **kwargs),
+ block=False
+ )
+
+
+class AnsibleEndPlay(Exception):
+ def __init__(self, result):
+ self.result = result
+
+
+class TaskQueueManager:
+
+ '''
+ This class handles the multiprocessing requirements of Ansible by
+ creating a pool of worker forks, a result handler fork, and a
+ manager object with shared datastructures/queues for coordinating
+ work between all processes.
+
+ The queue manager is responsible for loading the play strategy plugin,
+ which dispatches the Play's tasks to hosts.
+ '''
+
+ RUN_OK = 0
+ RUN_ERROR = 1
+ RUN_FAILED_HOSTS = 2
+ RUN_UNREACHABLE_HOSTS = 4
+ RUN_FAILED_BREAK_PLAY = 8
+ RUN_UNKNOWN_ERROR = 255
+
+ def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
+
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self._stats = AggregateStats()
+ self.passwords = passwords
+ self._stdout_callback = stdout_callback
+ self._run_additional_callbacks = run_additional_callbacks
+ self._run_tree = run_tree
+ self._forks = forks or 5
+
+ self._callbacks_loaded = False
+ self._callback_plugins = []
+ self._start_at_done = False
+
+ # make sure any module paths (if specified) are added to the module_loader
+ if context.CLIARGS.get('module_path', False):
+ for path in context.CLIARGS['module_path']:
+ if path:
+ module_loader.add_directory(path)
+
+ # a special flag to help us exit cleanly
+ self._terminated = False
+
+ # dictionaries to keep track of failed/unreachable hosts
+ self._failed_hosts = dict()
+ self._unreachable_hosts = dict()
+
+ try:
+ self._final_q = FinalQueue()
+ except OSError as e:
+ raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
+
+ self._callback_lock = threading.Lock()
+
+ # A temporary file (opened pre-fork) used by connection
+ # plugins for inter-process locking.
+ self._connection_lockfile = tempfile.TemporaryFile()
+
+ def _initialize_processes(self, num):
+ self._workers = []
+
+ for i in range(num):
+ self._workers.append(None)
+
+ def load_callbacks(self):
+ '''
+ Loads all available callbacks, with the exception of those which
+ utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
+ only one such callback plugin will be loaded.
+ '''
+
+ if self._callbacks_loaded:
+ return
+
+ stdout_callback_loaded = False
+ if self._stdout_callback is None:
+ self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
+
+ if isinstance(self._stdout_callback, CallbackBase):
+ stdout_callback_loaded = True
+ elif isinstance(self._stdout_callback, string_types):
+ if self._stdout_callback not in callback_loader:
+ raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
+ else:
+ self._stdout_callback = callback_loader.get(self._stdout_callback)
+ self._stdout_callback.set_options()
+ stdout_callback_loaded = True
+ else:
+ raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
+
+ # get all configured loadable callbacks (adjacent, builtin)
+ callback_list = list(callback_loader.all(class_only=True))
+
+ # add enabled callbacks that refer to collections, which might not appear in normal listing
+ for c in C.CALLBACKS_ENABLED:
+ # load all, as collection ones might be using short/redirected names and not a fqcn
+ plugin = callback_loader.get(c, class_only=True)
+
+ # TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
+ if plugin:
+ # avoids incorrect and dupes possible due to collections
+ if plugin not in callback_list:
+ callback_list.append(plugin)
+ else:
+ display.warning("Skipping callback plugin '%s', unable to load" % c)
+
+ # for each callback in the list see if we should add it to 'active callbacks' used in the play
+ for callback_plugin in callback_list:
+
+ callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
+ callback_needs_enabled = getattr(callback_plugin, 'CALLBACK_NEEDS_ENABLED', getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False))
+
+ # try to get colleciotn world name first
+ cnames = getattr(callback_plugin, '_redirected_names', [])
+ if cnames:
+ # store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
+ callback_name = cnames[0]
+ else:
+ # fallback to 'old loader name'
+ (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
+
+ display.vvvvv("Attempting to use '%s' callback." % (callback_name))
+ if callback_type == 'stdout':
+ # we only allow one callback of type 'stdout' to be loaded,
+ if callback_name != self._stdout_callback or stdout_callback_loaded:
+ display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
+ continue
+ stdout_callback_loaded = True
+ elif callback_name == 'tree' and self._run_tree:
+ # TODO: remove special case for tree, which is an adhoc cli option --tree
+ pass
+ elif not self._run_additional_callbacks or (callback_needs_enabled and (
+ # only run if not adhoc, or adhoc was specifically configured to run + check enabled list
+ C.CALLBACKS_ENABLED is None or callback_name not in C.CALLBACKS_ENABLED)):
+ # 2.x plugins shipped with ansible should require enabling, older or non shipped should load automatically
+ continue
+
+ try:
+ callback_obj = callback_plugin()
+ # avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
+ # really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
+ if callback_obj:
+ # skip initializing if we already did the work for the same plugin (even with diff names)
+ if callback_obj not in self._callback_plugins:
+ callback_obj.set_options()
+ self._callback_plugins.append(callback_obj)
+ else:
+ display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
+ else:
+ display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
+ continue
+ except Exception as e:
+ display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
+ continue
+
+ self._callbacks_loaded = True
+
+ def run(self, play):
+ '''
+ Iterates over the roles/tasks in a play, using the given (or default)
+ strategy for queueing tasks. The default is the linear strategy, which
+ operates like classic Ansible by keeping all hosts in lock-step with
+ a given task (meaning no hosts move on to the next task until all hosts
+ are done with the current task).
+ '''
+
+ if not self._callbacks_loaded:
+ self.load_callbacks()
+
+ all_vars = self._variable_manager.get_vars(play=play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ warn_if_reserved(all_vars, templar.environment.globals.keys())
+
+ new_play = play.copy()
+ new_play.post_validate(templar)
+ new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
+
+ self.hostvars = HostVars(
+ inventory=self._inventory,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+
+ play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
+ if (self._stdout_callback and
+ hasattr(self._stdout_callback, 'set_play_context')):
+ self._stdout_callback.set_play_context(play_context)
+
+ for callback_plugin in self._callback_plugins:
+ if hasattr(callback_plugin, 'set_play_context'):
+ callback_plugin.set_play_context(play_context)
+
+ self.send_callback('v2_playbook_on_play_start', new_play)
+
+ # build the iterator
+ iterator = PlayIterator(
+ inventory=self._inventory,
+ play=new_play,
+ play_context=play_context,
+ variable_manager=self._variable_manager,
+ all_vars=all_vars,
+ start_at_done=self._start_at_done,
+ )
+
+ # adjust to # of workers to configured forks or size of batch, whatever is lower
+ self._initialize_processes(min(self._forks, iterator.batch_size))
+
+ # load the specified strategy (or the default linear one)
+ strategy = strategy_loader.get(new_play.strategy, self)
+ if strategy is None:
+ raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
+
+ # Because the TQM may survive multiple play runs, we start by marking
+ # any hosts as failed in the iterator here which may have been marked
+ # as failed in previous runs. Then we clear the internal list of failed
+ # hosts so we know what failed this round.
+ for host_name in self._failed_hosts.keys():
+ host = self._inventory.get_host(host_name)
+ iterator.mark_host_failed(host)
+ for host_name in self._unreachable_hosts.keys():
+ iterator._play._removed_hosts.append(host_name)
+
+ self.clear_failed_hosts()
+
+ # during initialization, the PlayContext will clear the start_at_task
+ # field to signal that a matching task was found, so check that here
+ # and remember it so we don't try to skip tasks on future plays
+ if context.CLIARGS.get('start_at_task') is not None and play_context.start_at_task is None:
+ self._start_at_done = True
+
+ # and run the play using the strategy and cleanup on way out
+ try:
+ play_return = strategy.run(iterator, play_context)
+ finally:
+ strategy.cleanup()
+ self._cleanup_processes()
+
+ # now re-save the hosts that failed from the iterator to our internal list
+ for host_name in iterator.get_failed_hosts():
+ self._failed_hosts[host_name] = True
+
+ if iterator.end_play:
+ raise AnsibleEndPlay(play_return)
+
+ return play_return
+
+ def cleanup(self):
+ display.debug("RUNNING CLEANUP")
+ self.terminate()
+ self._final_q.close()
+ self._cleanup_processes()
+ # We no longer flush on every write in ``Display.display``
+ # just ensure we've flushed during cleanup
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ def _cleanup_processes(self):
+ if hasattr(self, '_workers'):
+ for attempts_remaining in range(C.WORKER_SHUTDOWN_POLL_COUNT - 1, -1, -1):
+ if not any(worker_prc and worker_prc.is_alive() for worker_prc in self._workers):
+ break
+
+ if attempts_remaining:
+ time.sleep(C.WORKER_SHUTDOWN_POLL_DELAY)
+ else:
+ display.warning('One or more worker processes are still running and will be terminated.')
+
+ for worker_prc in self._workers:
+ if worker_prc and worker_prc.is_alive():
+ try:
+ worker_prc.terminate()
+ except AttributeError:
+ pass
+
+ def clear_failed_hosts(self):
+ self._failed_hosts = dict()
+
+ def get_inventory(self):
+ return self._inventory
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def get_loader(self):
+ return self._loader
+
+ def get_workers(self):
+ return self._workers[:]
+
+ def terminate(self):
+ self._terminated = True
+
+ def has_dead_workers(self):
+
+ # [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
+ # <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
+
+ defunct = False
+ for x in self._workers:
+ if getattr(x, 'exitcode', None):
+ defunct = True
+ return defunct
+
+ @lock_decorator(attr='_callback_lock')
+ def send_callback(self, method_name, *args, **kwargs):
+ for callback_plugin in [self._stdout_callback] + self._callback_plugins:
+ # a plugin that set self.disabled to True will not be called
+ # see osx_say.py example for such a plugin
+ if getattr(callback_plugin, 'disabled', False):
+ continue
+
+ # a plugin can opt in to implicit tasks (such as meta). It does this
+ # by declaring self.wants_implicit_tasks = True.
+ wants_implicit_tasks = getattr(callback_plugin, 'wants_implicit_tasks', False)
+
+ # try to find v2 method, fallback to v1 method, ignore callback if no method found
+ methods = []
+ for possible in [method_name, 'v2_on_any']:
+ gotit = getattr(callback_plugin, possible, None)
+ if gotit is None:
+ gotit = getattr(callback_plugin, possible.removeprefix('v2_'), None)
+ if gotit is not None:
+ methods.append(gotit)
+
+ # send clean copies
+ new_args = []
+
+ # If we end up being given an implicit task, we'll set this flag in
+ # the loop below. If the plugin doesn't care about those, then we
+ # check and continue to the next iteration of the outer loop.
+ is_implicit_task = False
+
+ for arg in args:
+ # FIXME: add play/task cleaners
+ if isinstance(arg, TaskResult):
+ new_args.append(arg.clean_copy())
+ # elif isinstance(arg, Play):
+ # elif isinstance(arg, Task):
+ else:
+ new_args.append(arg)
+
+ if isinstance(arg, Task) and arg.implicit:
+ is_implicit_task = True
+
+ if is_implicit_task and not wants_implicit_tasks:
+ continue
+
+ for method in methods:
+ try:
+ method(*new_args, **kwargs)
+ except Exception as e:
+ # TODO: add config toggle to make this fatal or not?
+ display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
+ from traceback import format_tb
+ from sys import exc_info
+ display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
new file mode 100644
index 0000000..543b860
--- /dev/null
+++ b/lib/ansible/executor/task_result.py
@@ -0,0 +1,154 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.parsing.dataloader import DataLoader
+from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
+
+_IGNORE = ('failed', 'skipped')
+_PRESERVE = ('attempts', 'changed', 'retries')
+_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
+
+# stuff callbacks need
+CLEAN_EXCEPTIONS = (
+ '_ansible_verbose_always', # for debug and other actions, to always expand data (pretty jsonification)
+ '_ansible_item_label', # to know actual 'item' variable
+ '_ansible_no_log', # jic we didnt clean up well enough, DON'T LOG
+ '_ansible_verbose_override', # controls display of ansible_facts, gathering would be very noise with -v otherwise
+)
+
+
+class TaskResult:
+ '''
+ This class is responsible for interpreting the resulting data
+ from an executed task, and provides helper methods for determining
+ the result of a given task.
+ '''
+
+ def __init__(self, host, task, return_data, task_fields=None):
+ self._host = host
+ self._task = task
+
+ if isinstance(return_data, dict):
+ self._result = return_data.copy()
+ else:
+ self._result = DataLoader().load(return_data)
+
+ if task_fields is None:
+ self._task_fields = dict()
+ else:
+ self._task_fields = task_fields
+
+ @property
+ def task_name(self):
+ return self._task_fields.get('name', None) or self._task.get_name()
+
+ def is_changed(self):
+ return self._check_key('changed')
+
+ def is_skipped(self):
+ # loop results
+ if 'results' in self._result:
+ results = self._result['results']
+ # Loop tasks are only considered skipped if all items were skipped.
+ # some squashed results (eg, yum) are not dicts and can't be skipped individually
+ if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
+ return True
+
+ # regular tasks and squashed non-dict results
+ return self._result.get('skipped', False)
+
+ def is_failed(self):
+ if 'failed_when_result' in self._result or \
+ 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
+ return self._check_key('failed_when_result')
+ else:
+ return self._check_key('failed')
+
+ def is_unreachable(self):
+ return self._check_key('unreachable')
+
+ def needs_debugger(self, globally_enabled=False):
+ _debugger = self._task_fields.get('debugger')
+ _ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
+
+ ret = False
+ if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
+ ret = True
+
+ if _debugger in ('always',):
+ ret = True
+ elif _debugger in ('never',):
+ ret = False
+ elif _debugger in ('on_failed',) and self.is_failed() and not _ignore_errors:
+ ret = True
+ elif _debugger in ('on_unreachable',) and self.is_unreachable():
+ ret = True
+ elif _debugger in ('on_skipped',) and self.is_skipped():
+ ret = True
+
+ return ret
+
+ def _check_key(self, key):
+ '''get a specific key from the result or its items'''
+
+ if isinstance(self._result, dict) and key in self._result:
+ return self._result.get(key, False)
+ else:
+ flag = False
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag |= res.get(key, False)
+ return flag
+
+ def clean_copy(self):
+
+ ''' returns 'clean' taskresult object '''
+
+ # FIXME: clean task_fields, _task and _host copies
+ result = TaskResult(self._host, self._task, {}, self._task_fields)
+
+ # statuses are already reflected on the event type
+ if result._task and result._task.action in C._ACTION_DEBUG:
+ # debug is verbose by default to display vars, no need to add invocation
+ ignore = _IGNORE + ('invocation',)
+ else:
+ ignore = _IGNORE
+
+ subset = {}
+ # preserve subset for later
+ for sub in _SUB_PRESERVE:
+ if sub in self._result:
+ subset[sub] = {}
+ for key in _SUB_PRESERVE[sub]:
+ if key in self._result[sub]:
+ subset[sub][key] = self._result[sub][key]
+
+ if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
+ x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
+
+ # preserve full
+ for preserve in _PRESERVE:
+ if preserve in self._result:
+ x[preserve] = self._result[preserve]
+
+ result._result = x
+ elif self._result:
+ result._result = module_response_deepcopy(self._result)
+
+ # actualy remove
+ for remove_key in ignore:
+ if remove_key in result._result:
+ del result._result[remove_key]
+
+ # remove almost ALL internal keys, keep ones relevant to callback
+ strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS)
+
+ # keep subset
+ result._result.update(subset)
+
+ return result
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
new file mode 100644
index 0000000..d3b9035
--- /dev/null
+++ b/lib/ansible/galaxy/__init__.py
@@ -0,0 +1,72 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+''' This manages remote shared Ansible objects, mainly roles'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.constants as C
+from ansible import context
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.yaml import yaml_load
+
+# default_readme_template
+# default_meta_template
+
+
+def get_collections_galaxy_meta_info():
+ meta_path = os.path.join(os.path.dirname(__file__), 'data', 'collections_galaxy_meta.yml')
+ with open(to_bytes(meta_path, errors='surrogate_or_strict'), 'rb') as galaxy_obj:
+ return yaml_load(galaxy_obj)
+
+
+class Galaxy(object):
+ ''' Keeps global galaxy info '''
+
+ def __init__(self):
+ # TODO: eventually remove this as it contains a mismash of properties that aren't really global
+
+ # roles_path needs to be a list and will be by default
+ roles_path = context.CLIARGS.get('roles_path', C.DEFAULT_ROLES_PATH)
+ # cli option handling is responsible for splitting roles_path
+ self.roles_paths = roles_path
+
+ self.roles = {}
+
+ # load data path for resource usage
+ this_dir, this_filename = os.path.split(__file__)
+ type_path = context.CLIARGS.get('role_type', 'default')
+ if type_path == 'default':
+ type_path = os.path.join(type_path, context.CLIARGS.get('type'))
+
+ self.DATA_PATH = os.path.join(this_dir, 'data', type_path)
+
+ @property
+ def default_role_skeleton_path(self):
+ return self.DATA_PATH
+
+ def add_role(self, role):
+ self.roles[role.name] = role
+
+ def remove_role(self, role_name):
+ del self.roles[role_name]
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
new file mode 100644
index 0000000..8dea804
--- /dev/null
+++ b/lib/ansible/galaxy/api.py
@@ -0,0 +1,913 @@
+# (C) 2013, James Cammarata <jcammarata@ansible.com>
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import datetime
+import functools
+import hashlib
+import json
+import os
+import stat
+import tarfile
+import time
+import threading
+
+from urllib.error import HTTPError
+from urllib.parse import quote as urlquote, urlencode, urlparse, parse_qs, urljoin
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils.api import retry_with_delays_and_condition
+from ansible.module_utils.api import generate_jittered_backoff
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.urls import open_url, prepare_multipart
+from ansible.utils.display import Display
+from ansible.utils.hashing import secure_hash_s
+from ansible.utils.path import makedirs_safe
+
+display = Display()
+_CACHE_LOCK = threading.Lock()
+COLLECTION_PAGE_SIZE = 100
+RETRY_HTTP_ERROR_CODES = [ # TODO: Allow user-configuration
+ 429, # Too Many Requests
+ 520, # Galaxy rate limit error code (Cloudflare unknown error)
+]
+
+
+def cache_lock(func):
+ def wrapped(*args, **kwargs):
+ with _CACHE_LOCK:
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
+def is_rate_limit_exception(exception):
+ # Note: cloud.redhat.com masks rate limit errors with 403 (Forbidden) error codes.
+ # Since 403 could reflect the actual problem (such as an expired token), we should
+ # not retry by default.
+ return isinstance(exception, GalaxyError) and exception.http_code in RETRY_HTTP_ERROR_CODES
+
+
+def g_connect(versions):
+ """
+ Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
+ endpoint.
+
+ :param versions: A list of API versions that the function supports.
+ """
+ def decorator(method):
+ def wrapped(self, *args, **kwargs):
+ if not self._available_api_versions:
+ display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
+
+ # Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
+ # auth for Automation Hub.
+ n_url = self.api_server
+ error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
+
+ if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
+ n_url = 'https://galaxy.ansible.com/api/'
+
+ try:
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
+ except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
+ # Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
+ # root (not JSON, no 'available_versions') so try appending '/api/'
+ if n_url.endswith('/api') or n_url.endswith('/api/'):
+ raise
+
+ # Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
+ n_url = _urljoin(n_url, '/api/')
+ try:
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg, cache=True)
+ except GalaxyError as new_err:
+ if new_err.http_code == 404:
+ raise err
+ raise
+
+ if 'available_versions' not in data:
+ raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
+ "on %s" % (n_url, self.api_server))
+
+ # Update api_server to point to the "real" API root, which in this case could have been the configured
+ # url + '/api/' appended.
+ self.api_server = n_url
+
+ # Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
+ # it isn't returned in the available_versions dict.
+ available_versions = data.get('available_versions', {u'v1': u'v1/'})
+ if list(available_versions.keys()) == [u'v1']:
+ available_versions[u'v2'] = u'v2/'
+
+ self._available_api_versions = available_versions
+ display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
+ % (', '.join(available_versions.keys()), self.name, self.api_server))
+
+ # Verify that the API versions the function works with are available on the server specified.
+ available_versions = set(self._available_api_versions.keys())
+ common_versions = set(versions).intersection(available_versions)
+ if not common_versions:
+ raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
+ % (method.__name__, ", ".join(versions), ", ".join(available_versions),
+ self.name, self.api_server))
+
+ return method(self, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def get_cache_id(url):
+ """ Gets the cache ID for the URL specified. """
+ url_info = urlparse(url)
+
+ port = None
+ try:
+ port = url_info.port
+ except ValueError:
+ pass # While the URL is probably invalid, let the caller figure that out when using it
+
+ # Cannot use netloc because it could contain credentials if the server specified had them in there.
+ return '%s:%s' % (url_info.hostname, port or '')
+
+
+@cache_lock
+def _load_cache(b_cache_path):
+ """ Loads the cache file requested if possible. The file must not be world writable. """
+ cache_version = 1
+
+ if not os.path.isfile(b_cache_path):
+ display.vvvv("Creating Galaxy API response cache file at '%s'" % to_text(b_cache_path))
+ with open(b_cache_path, 'w'):
+ os.chmod(b_cache_path, 0o600)
+
+ cache_mode = os.stat(b_cache_path).st_mode
+ if cache_mode & stat.S_IWOTH:
+ display.warning("Galaxy cache has world writable access (%s), ignoring it as a cache source."
+ % to_text(b_cache_path))
+ return
+
+ with open(b_cache_path, mode='rb') as fd:
+ json_val = to_text(fd.read(), errors='surrogate_or_strict')
+
+ try:
+ cache = json.loads(json_val)
+ except ValueError:
+ cache = None
+
+ if not isinstance(cache, dict) or cache.get('version', None) != cache_version:
+ display.vvvv("Galaxy cache file at '%s' has an invalid version, clearing" % to_text(b_cache_path))
+ cache = {'version': cache_version}
+
+ # Set the cache after we've cleared the existing entries
+ with open(b_cache_path, mode='wb') as fd:
+ fd.write(to_bytes(json.dumps(cache), errors='surrogate_or_strict'))
+
+ return cache
+
+
+def _urljoin(*args):
+ return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
+
+
+class GalaxyError(AnsibleError):
+ """ Error for bad Galaxy server responses. """
+
+ def __init__(self, http_error, message):
+ super(GalaxyError, self).__init__(message)
+ self.http_code = http_error.code
+ self.url = http_error.geturl()
+
+ try:
+ http_msg = to_text(http_error.read())
+ err_info = json.loads(http_msg)
+ except (AttributeError, ValueError):
+ err_info = {}
+
+ url_split = self.url.split('/')
+ if 'v2' in url_split:
+ galaxy_msg = err_info.get('message', http_error.reason)
+ code = err_info.get('code', 'Unknown')
+ full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
+ elif 'v3' in url_split:
+ errors = err_info.get('errors', [])
+ if not errors:
+ errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
+
+ message_lines = []
+ for error in errors:
+ error_msg = error.get('detail') or error.get('title') or http_error.reason
+ error_code = error.get('code') or 'Unknown'
+ message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
+ message_lines.append(message_line)
+
+ full_error_msg = "%s %s" % (message, ', '.join(message_lines))
+ else:
+ # v1 and unknown API endpoints
+ galaxy_msg = err_info.get('default', http_error.reason)
+ full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
+
+ self.message = to_native(full_error_msg)
+
+
+# Keep the raw string results for the date. It's too complex to parse as a datetime object and the various APIs return
+# them in different formats.
+CollectionMetadata = collections.namedtuple('CollectionMetadata', ['namespace', 'name', 'created_str', 'modified_str'])
+
+
+class CollectionVersionMetadata:
+
+ def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies, signatures_url, signatures):
+ """
+ Contains common information about a collection on a Galaxy server to smooth through API differences for
+ Collection and define a standard meta info for a collection.
+
+ :param namespace: The namespace name.
+ :param name: The collection name.
+ :param version: The version that the metadata refers to.
+ :param download_url: The URL to download the collection.
+ :param artifact_sha256: The SHA256 of the collection artifact for later verification.
+ :param dependencies: A dict of dependencies of the collection.
+ :param signatures_url: The URL to the specific version of the collection.
+ :param signatures: The list of signatures found at the signatures_url.
+ """
+ self.namespace = namespace
+ self.name = name
+ self.version = version
+ self.download_url = download_url
+ self.artifact_sha256 = artifact_sha256
+ self.dependencies = dependencies
+ self.signatures_url = signatures_url
+ self.signatures = signatures
+
+
+@functools.total_ordering
+class GalaxyAPI:
+ """ This class is meant to be used as a API client for an Ansible Galaxy server """
+
+ def __init__(
+ self, galaxy, name, url,
+ username=None, password=None, token=None, validate_certs=True,
+ available_api_versions=None,
+ clear_response_cache=False, no_cache=True,
+ priority=float('inf'),
+ timeout=60,
+ ):
+ self.galaxy = galaxy
+ self.name = name
+ self.username = username
+ self.password = password
+ self.token = token
+ self.api_server = url
+ self.validate_certs = validate_certs
+ self.timeout = timeout
+ self._available_api_versions = available_api_versions or {}
+ self._priority = priority
+ self._server_timeout = timeout
+
+ b_cache_dir = to_bytes(C.GALAXY_CACHE_DIR, errors='surrogate_or_strict')
+ makedirs_safe(b_cache_dir, mode=0o700)
+ self._b_cache_path = os.path.join(b_cache_dir, b'api.json')
+
+ if clear_response_cache:
+ with _CACHE_LOCK:
+ if os.path.exists(self._b_cache_path):
+ display.vvvv("Clearing cache file (%s)" % to_text(self._b_cache_path))
+ os.remove(self._b_cache_path)
+
+ self._cache = None
+ if not no_cache:
+ self._cache = _load_cache(self._b_cache_path)
+
+ display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
+
+ def __str__(self):
+ # type: (GalaxyAPI) -> str
+ """Render GalaxyAPI as a native string representation."""
+ return to_native(self.name)
+
+ def __unicode__(self):
+ # type: (GalaxyAPI) -> str
+ """Render GalaxyAPI as a unicode/text string representation."""
+ return to_text(self.name)
+
+ def __repr__(self):
+ # type: (GalaxyAPI) -> str
+ """Render GalaxyAPI as an inspectable string representation."""
+ return (
+ '<{instance!s} "{name!s}" @ {url!s} with priority {priority!s}>'.
+ format(
+ instance=self, name=self.name,
+ priority=self._priority, url=self.api_server,
+ )
+ )
+
+ def __lt__(self, other_galaxy_api):
+ # type: (GalaxyAPI, GalaxyAPI) -> bool
+ """Return whether the instance priority is higher than other."""
+ if not isinstance(other_galaxy_api, self.__class__):
+ return NotImplemented
+
+ return (
+ self._priority > other_galaxy_api._priority or
+ self.name < self.name
+ )
+
+ @property # type: ignore[misc] # https://github.com/python/mypy/issues/1362
+ @g_connect(['v1', 'v2', 'v3'])
+ def available_api_versions(self):
+ # Calling g_connect will populate self._available_api_versions
+ return self._available_api_versions
+
+ @retry_with_delays_and_condition(
+ backoff_iterator=generate_jittered_backoff(retries=6, delay_base=2, delay_threshold=40),
+ should_retry_error=is_rate_limit_exception
+ )
+ def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None,
+ cache=False, cache_key=None):
+ url_info = urlparse(url)
+ cache_id = get_cache_id(url)
+ if not cache_key:
+ cache_key = url_info.path
+ query = parse_qs(url_info.query)
+ if cache and self._cache:
+ server_cache = self._cache.setdefault(cache_id, {})
+ iso_datetime_format = '%Y-%m-%dT%H:%M:%SZ'
+
+ valid = False
+ if cache_key in server_cache:
+ expires = datetime.datetime.strptime(server_cache[cache_key]['expires'], iso_datetime_format)
+ valid = datetime.datetime.utcnow() < expires
+
+ is_paginated_url = 'page' in query or 'offset' in query
+ if valid and not is_paginated_url:
+ # Got a hit on the cache and we aren't getting a paginated response
+ path_cache = server_cache[cache_key]
+ if path_cache.get('paginated'):
+ if '/v3/' in cache_key:
+ res = {'links': {'next': None}}
+ else:
+ res = {'next': None}
+
+ # Technically some v3 paginated APIs return in 'data' but the caller checks the keys for this so
+ # always returning the cache under results is fine.
+ res['results'] = []
+ for result in path_cache['results']:
+ res['results'].append(result)
+
+ else:
+ res = path_cache['results']
+
+ return res
+
+ elif not is_paginated_url:
+ # The cache entry had expired or does not exist, start a new blank entry to be filled later.
+ expires = datetime.datetime.utcnow()
+ expires += datetime.timedelta(days=1)
+ server_cache[cache_key] = {
+ 'expires': expires.strftime(iso_datetime_format),
+ 'paginated': False,
+ }
+
+ headers = headers or {}
+ self._add_auth_token(headers, url, required=auth_required)
+
+ try:
+ display.vvvv("Calling Galaxy at %s" % url)
+ resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
+ method=method, timeout=self._server_timeout, http_agent=user_agent(), follow_redirects='safe')
+ except HTTPError as e:
+ raise GalaxyError(e, error_context_msg)
+ except Exception as e:
+ raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)))
+
+ resp_data = to_text(resp.read(), errors='surrogate_or_strict')
+ try:
+ data = json.loads(resp_data)
+ except ValueError:
+ raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
+ % (resp.url, to_native(resp_data)))
+
+ if cache and self._cache:
+ path_cache = self._cache[cache_id][cache_key]
+
+ # v3 can return data or results for paginated results. Scan the result so we can determine what to cache.
+ paginated_key = None
+ for key in ['data', 'results']:
+ if key in data:
+ paginated_key = key
+ break
+
+ if paginated_key:
+ path_cache['paginated'] = True
+ results = path_cache.setdefault('results', [])
+ for result in data[paginated_key]:
+ results.append(result)
+
+ else:
+ path_cache['results'] = data
+
+ return data
+
+ def _add_auth_token(self, headers, url, token_type=None, required=False):
+ # Don't add the auth token if one is already present
+ if 'Authorization' in headers:
+ return
+
+ if not self.token and required:
+ raise AnsibleError("No access token or username set. A token can be set with --api-key "
+ "or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
+
+ if self.token:
+ headers.update(self.token.headers())
+
+ @cache_lock
+ def _set_cache(self):
+ with open(self._b_cache_path, mode='wb') as fd:
+ fd.write(to_bytes(json.dumps(self._cache), errors='surrogate_or_strict'))
+
+ @g_connect(['v1'])
+ def authenticate(self, github_token):
+ """
+ Retrieve an authentication token
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
+ args = urlencode({"github_token": github_token})
+
+ try:
+ resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent(), timeout=self._server_timeout)
+ except HTTPError as e:
+ raise GalaxyError(e, 'Attempting to authenticate to galaxy')
+ except Exception as e:
+ raise AnsibleError('Unable to authenticate to galaxy: %s' % to_native(e), orig_exc=e)
+
+ data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+ return data
+
+ @g_connect(['v1'])
+ def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
+ """
+ Post an import request
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
+ args = {
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "github_reference": reference if reference else ""
+ }
+ if role_name:
+ args['alternate_role_name'] = role_name
+ elif github_repo.startswith('ansible-role'):
+ args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
+ data = self._call_galaxy(url, args=urlencode(args), method="POST")
+ if data.get('results', None):
+ return data['results']
+ return data
+
+ @g_connect(['v1'])
+ def get_import_task(self, task_id=None, github_user=None, github_repo=None):
+ """
+ Check the status of an import task.
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
+ if task_id is not None:
+ url = "%s?id=%d" % (url, task_id)
+ elif github_user is not None and github_repo is not None:
+ url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
+ else:
+ raise AnsibleError("Expected task_id or github_user and github_repo")
+
+ data = self._call_galaxy(url)
+ return data['results']
+
+ @g_connect(['v1'])
+ def lookup_role_by_name(self, role_name, notify=True):
+ """
+ Find a role by name.
+ """
+ role_name = to_text(urlquote(to_bytes(role_name)))
+
+ try:
+ parts = role_name.split(".")
+ user_name = ".".join(parts[0:-1])
+ role_name = parts[-1]
+ if notify:
+ display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
+ except Exception:
+ raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
+
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
+ "?owner__username=%s&name=%s" % (user_name, role_name))
+ data = self._call_galaxy(url)
+ if len(data["results"]) != 0:
+ return data["results"][0]
+ return None
+
+ @g_connect(['v1'])
+ def fetch_role_related(self, related, role_id):
+ """
+ Fetch the list of related items for the given role.
+ The url comes from the 'related' field of the role.
+ """
+
+ results = []
+ try:
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
+ "?page_size=50")
+ data = self._call_galaxy(url)
+ results = data['results']
+ done = (data.get('next_link', None) is None)
+
+ # https://github.com/ansible/ansible/issues/64355
+ # api_server contains part of the API path but next_link includes the /api part so strip it out.
+ url_info = urlparse(self.api_server)
+ base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
+
+ while not done:
+ url = _urljoin(base_url, data['next_link'])
+ data = self._call_galaxy(url)
+ results += data['results']
+ done = (data.get('next_link', None) is None)
+ except Exception as e:
+ display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
+ % (role_id, related, to_text(e)))
+ return results
+
+ @g_connect(['v1'])
+ def get_list(self, what):
+ """
+ Fetch the list of items specified.
+ """
+ try:
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
+ data = self._call_galaxy(url)
+ if "results" in data:
+ results = data['results']
+ else:
+ results = data
+ done = True
+ if "next" in data:
+ done = (data.get('next_link', None) is None)
+ while not done:
+ url = _urljoin(self.api_server, data['next_link'])
+ data = self._call_galaxy(url)
+ results += data['results']
+ done = (data.get('next_link', None) is None)
+ return results
+ except Exception as error:
+ raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
+
+ @g_connect(['v1'])
+ def search_roles(self, search, **kwargs):
+
+ search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
+
+ if search:
+ search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
+
+ tags = kwargs.get('tags', None)
+ platforms = kwargs.get('platforms', None)
+ page_size = kwargs.get('page_size', None)
+ author = kwargs.get('author', None)
+
+ if tags and isinstance(tags, string_types):
+ tags = tags.split(',')
+ search_url += '&tags_autocomplete=' + '+'.join(tags)
+
+ if platforms and isinstance(platforms, string_types):
+ platforms = platforms.split(',')
+ search_url += '&platforms_autocomplete=' + '+'.join(platforms)
+
+ if page_size:
+ search_url += '&page_size=%s' % page_size
+
+ if author:
+ search_url += '&username_autocomplete=%s' % author
+
+ data = self._call_galaxy(search_url)
+ return data
+
+ @g_connect(['v1'])
+ def add_secret(self, source, github_user, github_repo, secret):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
+ args = urlencode({
+ "source": source,
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "secret": secret
+ })
+ data = self._call_galaxy(url, args=args, method="POST")
+ return data
+
+ @g_connect(['v1'])
+ def list_secrets(self):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
+ data = self._call_galaxy(url, auth_required=True)
+ return data
+
+ @g_connect(['v1'])
+ def remove_secret(self, secret_id):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
+ data = self._call_galaxy(url, auth_required=True, method='DELETE')
+ return data
+
+ @g_connect(['v1'])
+ def delete_role(self, github_user, github_repo):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
+ "?github_user=%s&github_repo=%s" % (github_user, github_repo))
+ data = self._call_galaxy(url, auth_required=True, method='DELETE')
+ return data
+
+ # Collection APIs #
+
+ @g_connect(['v2', 'v3'])
+ def publish_collection(self, collection_path):
+ """
+ Publishes a collection to a Galaxy server and returns the import task URI.
+
+ :param collection_path: The path to the collection tarball to publish.
+ :return: The import task URI that contains the import results.
+ """
+ display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
+
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_collection_path):
+ raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
+ elif not tarfile.is_tarfile(b_collection_path):
+ raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
+ "build' to create a proper release artifact." % to_native(collection_path))
+
+ with open(b_collection_path, 'rb') as collection_tar:
+ sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
+
+ content_type, b_form_data = prepare_multipart(
+ {
+ 'sha256': sha256,
+ 'file': {
+ 'filename': b_collection_path,
+ 'mime_type': 'application/octet-stream',
+ },
+ }
+ )
+
+ headers = {
+ 'Content-type': content_type,
+ 'Content-length': len(b_form_data),
+ }
+
+ if 'v3' in self.available_api_versions:
+ n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
+ else:
+ n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
+
+ resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
+ error_context_msg='Error when publishing collection to %s (%s)'
+ % (self.name, self.api_server))
+
+ return resp['task']
+
+ @g_connect(['v2', 'v3'])
+ def wait_import_task(self, task_id, timeout=0):
+ """
+ Waits until the import process on the Galaxy server has completed or the timeout is reached.
+
+ :param task_id: The id of the import task to wait for. This can be parsed out of the return
+ value for GalaxyAPI.publish_collection.
+ :param timeout: The timeout in seconds, 0 is no timeout.
+ """
+ state = 'waiting'
+ data = None
+
+ # Construct the appropriate URL per version
+ if 'v3' in self.available_api_versions:
+ full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
+ 'imports/collections', task_id, '/')
+ else:
+ full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
+ 'collection-imports', task_id, '/')
+
+ display.display("Waiting until Galaxy import task %s has completed" % full_url)
+ start = time.time()
+ wait = 2
+
+ while timeout == 0 or (time.time() - start) < timeout:
+ try:
+ data = self._call_galaxy(full_url, method='GET', auth_required=True,
+ error_context_msg='Error when getting import task results at %s' % full_url)
+ except GalaxyError as e:
+ if e.http_code != 404:
+ raise
+ # The import job may not have started, and as such, the task url may not yet exist
+ display.vvv('Galaxy import process has not started, wait %s seconds before trying again' % wait)
+ time.sleep(wait)
+ continue
+
+ state = data.get('state', 'waiting')
+
+ if data.get('finished_at', None):
+ break
+
+ display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
+ % (state, wait))
+ time.sleep(wait)
+
+ # poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
+ wait = min(30, wait * 1.5)
+ if state == 'waiting':
+ raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
+ % to_native(full_url))
+
+ for message in data.get('messages', []):
+ level = message['level']
+ if level.lower() == 'error':
+ display.error("Galaxy import error message: %s" % message['message'])
+ elif level.lower() == 'warning':
+ display.warning("Galaxy import warning message: %s" % message['message'])
+ else:
+ display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
+
+ if state == 'failed':
+ code = to_native(data['error'].get('code', 'UNKNOWN'))
+ description = to_native(
+ data['error'].get('description', "Unknown error, see %s for more details" % full_url))
+ raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_metadata(self, namespace, name):
+ """
+ Gets the collection information from the Galaxy server about a specific Collection.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ return: CollectionMetadata about the collection.
+ """
+ if 'v3' in self.available_api_versions:
+ api_path = self.available_api_versions['v3']
+ field_map = [
+ ('created_str', 'created_at'),
+ ('modified_str', 'updated_at'),
+ ]
+ else:
+ api_path = self.available_api_versions['v2']
+ field_map = [
+ ('created_str', 'created'),
+ ('modified_str', 'modified'),
+ ]
+
+ info_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, '/')
+ error_context_msg = 'Error when getting the collection info for %s.%s from %s (%s)' \
+ % (namespace, name, self.name, self.api_server)
+ data = self._call_galaxy(info_url, error_context_msg=error_context_msg)
+
+ metadata = {}
+ for name, api_field in field_map:
+ metadata[name] = data.get(api_field, None)
+
+ return CollectionMetadata(namespace, name, **metadata)
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_version_metadata(self, namespace, name, version):
+ """
+ Gets the collection information from the Galaxy server about a specific Collection version.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :param version: Version of the collection to get the information for.
+ :return: CollectionVersionMetadata about the collection at the version requested.
+ """
+ api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
+ url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
+
+ n_collection_url = _urljoin(*url_paths)
+ error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
+ % (namespace, name, version, self.name, self.api_server)
+ data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
+ self._set_cache()
+
+ signatures = data.get('signatures') or []
+
+ return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
+ data['download_url'], data['artifact']['sha256'],
+ data['metadata']['dependencies'], data['href'], signatures)
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_versions(self, namespace, name):
+ """
+ Gets a list of available versions for a collection on a Galaxy server.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :return: A list of versions that are available.
+ """
+ relative_link = False
+ if 'v3' in self.available_api_versions:
+ api_path = self.available_api_versions['v3']
+ pagination_path = ['links', 'next']
+ relative_link = True # AH pagination results are relative an not an absolute URI.
+ else:
+ api_path = self.available_api_versions['v2']
+ pagination_path = ['next']
+
+ page_size_name = 'limit' if 'v3' in self.available_api_versions else 'page_size'
+ versions_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/?%s=%d' % (page_size_name, COLLECTION_PAGE_SIZE))
+ versions_url_info = urlparse(versions_url)
+ cache_key = versions_url_info.path
+
+ # We should only rely on the cache if the collection has not changed. This may slow things down but it ensures
+ # we are not waiting a day before finding any new collections that have been published.
+ if self._cache:
+ server_cache = self._cache.setdefault(get_cache_id(versions_url), {})
+ modified_cache = server_cache.setdefault('modified', {})
+
+ try:
+ modified_date = self.get_collection_metadata(namespace, name).modified_str
+ except GalaxyError as err:
+ if err.http_code != 404:
+ raise
+ # No collection found, return an empty list to keep things consistent with the various APIs
+ return []
+
+ cached_modified_date = modified_cache.get('%s.%s' % (namespace, name), None)
+ if cached_modified_date != modified_date:
+ modified_cache['%s.%s' % (namespace, name)] = modified_date
+ if versions_url_info.path in server_cache:
+ del server_cache[cache_key]
+
+ self._set_cache()
+
+ error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
+ % (namespace, name, self.name, self.api_server)
+
+ try:
+ data = self._call_galaxy(versions_url, error_context_msg=error_context_msg, cache=True, cache_key=cache_key)
+ except GalaxyError as err:
+ if err.http_code != 404:
+ raise
+ # v3 doesn't raise a 404 so we need to mimick the empty response from APIs that do.
+ return []
+
+ if 'data' in data:
+ # v3 automation-hub is the only known API that uses `data`
+ # since v3 pulp_ansible does not, we cannot rely on version
+ # to indicate which key to use
+ results_key = 'data'
+ else:
+ results_key = 'results'
+
+ versions = []
+ while True:
+ versions += [v['version'] for v in data[results_key]]
+
+ next_link = data
+ for path in pagination_path:
+ next_link = next_link.get(path, {})
+
+ if not next_link:
+ break
+ elif relative_link:
+ # TODO: This assumes the pagination result is relative to the root server. Will need to be verified
+ # with someone who knows the AH API.
+
+ # Remove the query string from the versions_url to use the next_link's query
+ versions_url = urljoin(versions_url, urlparse(versions_url).path)
+ next_link = versions_url.replace(versions_url_info.path, next_link)
+
+ data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
+ error_context_msg=error_context_msg, cache=True, cache_key=cache_key)
+ self._set_cache()
+
+ return versions
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_signatures(self, namespace, name, version):
+ """
+ Gets the collection signatures from the Galaxy server about a specific Collection version.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :param version: Version of the collection to get the information for.
+ :return: A list of signature strings.
+ """
+ api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
+ url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
+
+ n_collection_url = _urljoin(*url_paths)
+ error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
+ % (namespace, name, version, self.name, self.api_server)
+ data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)
+ self._set_cache()
+
+ try:
+ signatures = data["signatures"]
+ except KeyError:
+ # Noisy since this is used by the dep resolver, so require more verbosity than Galaxy calls
+ display.vvvvvv(f"Server {self.api_server} has not signed {namespace}.{name}:{version}")
+ return []
+ else:
+ return [signature_info["signature"] for signature_info in signatures]
diff --git a/lib/ansible/galaxy/collection/__init__.py b/lib/ansible/galaxy/collection/__init__.py
new file mode 100644
index 0000000..7a144c0
--- /dev/null
+++ b/lib/ansible/galaxy/collection/__init__.py
@@ -0,0 +1,1836 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Installed collections management package."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fnmatch
+import functools
+import json
+import os
+import queue
+import re
+import shutil
+import stat
+import sys
+import tarfile
+import tempfile
+import textwrap
+import threading
+import time
+import typing as t
+
+from collections import namedtuple
+from contextlib import contextmanager
+from dataclasses import dataclass, fields as dc_fields
+from hashlib import sha256
+from io import BytesIO
+from importlib.metadata import distribution
+from itertools import chain
+
+try:
+ from packaging.requirements import Requirement as PkgReq
+except ImportError:
+ class PkgReq: # type: ignore[no-redef]
+ pass
+
+ HAS_PACKAGING = False
+else:
+ HAS_PACKAGING = True
+
+try:
+ from distlib.manifest import Manifest # type: ignore[import]
+ from distlib import DistlibException # type: ignore[import]
+except ImportError:
+ HAS_DISTLIB = False
+else:
+ HAS_DISTLIB = True
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+
+ ManifestKeysType = t.Literal[
+ 'collection_info', 'file_manifest_file', 'format',
+ ]
+ FileMetaKeysType = t.Literal[
+ 'name',
+ 'ftype',
+ 'chksum_type',
+ 'chksum_sha256',
+ 'format',
+ ]
+ CollectionInfoKeysType = t.Literal[
+ # collection meta:
+ 'namespace', 'name', 'version',
+ 'authors', 'readme',
+ 'tags', 'description',
+ 'license', 'license_file',
+ 'dependencies',
+ 'repository', 'documentation',
+ 'homepage', 'issues',
+
+ # files meta:
+ FileMetaKeysType,
+ ]
+ ManifestValueType = t.Dict[CollectionInfoKeysType, t.Union[int, str, t.List[str], t.Dict[str, str], None]]
+ CollectionManifestType = t.Dict[ManifestKeysType, ManifestValueType]
+ FileManifestEntryType = t.Dict[FileMetaKeysType, t.Union[str, int, None]]
+ FilesManifestType = t.Dict[t.Literal['files', 'format'], t.Union[t.List[FileManifestEntryType], int]]
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.collection.concrete_artifact_manager import (
+ _consume_file,
+ _download_file,
+ _get_json_from_installed_dir,
+ _get_meta_from_src_dir,
+ _tarfile_extract,
+)
+from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+from ansible.galaxy.collection.gpg import (
+ run_gpg_verify,
+ parse_gpg_errors,
+ get_signature_from_source,
+ GPG_ERROR_MAP,
+)
+try:
+ from ansible.galaxy.dependency_resolution import (
+ build_collection_dependency_resolver,
+ )
+ from ansible.galaxy.dependency_resolution.errors import (
+ CollectionDependencyResolutionImpossible,
+ CollectionDependencyInconsistentCandidate,
+ )
+ from ansible.galaxy.dependency_resolution.providers import (
+ RESOLVELIB_VERSION,
+ RESOLVELIB_LOWERBOUND,
+ RESOLVELIB_UPPERBOUND,
+ )
+except ImportError:
+ HAS_RESOLVELIB = False
+else:
+ HAS_RESOLVELIB = True
+
+from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement, _is_installed_collection_dir,
+)
+from ansible.galaxy.dependency_resolution.versioning import meets_requirements
+from ansible.plugins.loader import get_all_plugin_loaders
+from ansible.module_utils.six import raise_from
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common.yaml import yaml_dump
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.hashing import secure_hash, secure_hash_s
+from ansible.utils.sentinel import Sentinel
+
+
+display = Display()
+
+MANIFEST_FORMAT = 1
+MANIFEST_FILENAME = 'MANIFEST.json'
+
+ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
+
+SIGNATURE_COUNT_RE = r"^(?P<strict>\+)?(?:(?P<count>\d+)|(?P<all>all))$"
+
+
+@dataclass
+class ManifestControl:
+ directives: list[str] = None
+ omit_default_directives: bool = False
+
+ def __post_init__(self):
+ # Allow a dict representing this dataclass to be splatted directly.
+ # Requires attrs to have a default value, so anything with a default
+ # of None is swapped for its, potentially mutable, default
+ for field in dc_fields(self):
+ if getattr(self, field.name) is None:
+ super().__setattr__(field.name, field.type())
+
+
+class CollectionSignatureError(Exception):
+ def __init__(self, reasons=None, stdout=None, rc=None, ignore=False):
+ self.reasons = reasons
+ self.stdout = stdout
+ self.rc = rc
+ self.ignore = ignore
+
+ self._reason_wrapper = None
+
+ def _report_unexpected(self, collection_name):
+ return (
+ f"Unexpected error for '{collection_name}': "
+ f"GnuPG signature verification failed with the return code {self.rc} and output {self.stdout}"
+ )
+
+ def _report_expected(self, collection_name):
+ header = f"Signature verification failed for '{collection_name}' (return code {self.rc}):"
+ return header + self._format_reasons()
+
+ def _format_reasons(self):
+ if self._reason_wrapper is None:
+ self._reason_wrapper = textwrap.TextWrapper(
+ initial_indent=" * ", # 6 chars
+ subsequent_indent=" ", # 6 chars
+ )
+
+ wrapped_reasons = [
+ '\n'.join(self._reason_wrapper.wrap(reason))
+ for reason in self.reasons
+ ]
+
+ return '\n' + '\n'.join(wrapped_reasons)
+
+ def report(self, collection_name):
+ if self.reasons:
+ return self._report_expected(collection_name)
+
+ return self._report_unexpected(collection_name)
+
+
+# FUTURE: expose actual verify result details for a collection on this object, maybe reimplement as dataclass on py3.8+
+class CollectionVerifyResult:
+ def __init__(self, collection_name): # type: (str) -> None
+ self.collection_name = collection_name # type: str
+ self.success = True # type: bool
+
+
+def verify_local_collection(local_collection, remote_collection, artifacts_manager):
+ # type: (Candidate, t.Optional[Candidate], ConcreteArtifactsManager) -> CollectionVerifyResult
+ """Verify integrity of the locally installed collection.
+
+ :param local_collection: Collection being checked.
+ :param remote_collection: Upstream collection (optional, if None, only verify local artifact)
+ :param artifacts_manager: Artifacts manager.
+ :return: a collection verify result object.
+ """
+ result = CollectionVerifyResult(local_collection.fqcn)
+
+ b_collection_path = to_bytes(local_collection.src, errors='surrogate_or_strict')
+
+ display.display("Verifying '{coll!s}'.".format(coll=local_collection))
+ display.display(
+ u"Installed collection found at '{path!s}'".
+ format(path=to_text(local_collection.src)),
+ )
+
+ modified_content = [] # type: list[ModifiedContent]
+
+ verify_local_only = remote_collection is None
+
+ # partial away the local FS detail so we can just ask generically during validation
+ get_json_from_validation_source = functools.partial(_get_json_from_installed_dir, b_collection_path)
+ get_hash_from_validation_source = functools.partial(_get_file_hash, b_collection_path)
+
+ if not verify_local_only:
+ # Compare installed version versus requirement version
+ if local_collection.ver != remote_collection.ver:
+ err = (
+ "{local_fqcn!s} has the version '{local_ver!s}' but "
+ "is being compared to '{remote_ver!s}'".format(
+ local_fqcn=local_collection.fqcn,
+ local_ver=local_collection.ver,
+ remote_ver=remote_collection.ver,
+ )
+ )
+ display.display(err)
+ result.success = False
+ return result
+
+ manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME)
+ signatures = list(local_collection.signatures)
+ if verify_local_only and local_collection.source_info is not None:
+ signatures = [info["signature"] for info in local_collection.source_info["signatures"]] + signatures
+ elif not verify_local_only and remote_collection.signatures:
+ signatures = list(remote_collection.signatures) + signatures
+
+ keyring_configured = artifacts_manager.keyring is not None
+ if not keyring_configured and signatures:
+ display.warning(
+ "The GnuPG keyring used for collection signature "
+ "verification was not configured but signatures were "
+ "provided by the Galaxy server. "
+ "Configure a keyring for ansible-galaxy to verify "
+ "the origin of the collection. "
+ "Skipping signature verification."
+ )
+ elif keyring_configured:
+ if not verify_file_signatures(
+ local_collection.fqcn,
+ manifest_file,
+ signatures,
+ artifacts_manager.keyring,
+ artifacts_manager.required_successful_signature_count,
+ artifacts_manager.ignore_signature_errors,
+ ):
+ result.success = False
+ return result
+ display.vvvv(f"GnuPG signature verification succeeded, verifying contents of {local_collection}")
+
+ if verify_local_only:
+ # since we're not downloading this, just seed it with the value from disk
+ manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
+ elif keyring_configured and remote_collection.signatures:
+ manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
+ else:
+ # fetch remote
+ b_temp_tar_path = ( # NOTE: AnsibleError is raised on URLError
+ artifacts_manager.get_artifact_path
+ if remote_collection.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(remote_collection)
+
+ display.vvv(
+ u"Remote collection cached as '{path!s}'".format(path=to_text(b_temp_tar_path))
+ )
+
+ # partial away the tarball details so we can just ask generically during validation
+ get_json_from_validation_source = functools.partial(_get_json_from_tar_file, b_temp_tar_path)
+ get_hash_from_validation_source = functools.partial(_get_tar_file_hash, b_temp_tar_path)
+
+ # Verify the downloaded manifest hash matches the installed copy before verifying the file manifest
+ manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME)
+ _verify_file_hash(b_collection_path, MANIFEST_FILENAME, manifest_hash, modified_content)
+
+ display.display('MANIFEST.json hash: {manifest_hash}'.format(manifest_hash=manifest_hash))
+
+ manifest = get_json_from_validation_source(MANIFEST_FILENAME)
+
+ # Use the manifest to verify the file manifest checksum
+ file_manifest_data = manifest['file_manifest_file']
+ file_manifest_filename = file_manifest_data['name']
+ expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
+
+ # Verify the file manifest before using it to verify individual files
+ _verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
+ file_manifest = get_json_from_validation_source(file_manifest_filename)
+
+ collection_dirs = set()
+ collection_files = {
+ os.path.join(b_collection_path, b'MANIFEST.json'),
+ os.path.join(b_collection_path, b'FILES.json'),
+ }
+
+ # Use the file manifest to verify individual file checksums
+ for manifest_data in file_manifest['files']:
+ name = manifest_data['name']
+
+ if manifest_data['ftype'] == 'file':
+ collection_files.add(
+ os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict'))
+ )
+ expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
+ _verify_file_hash(b_collection_path, name, expected_hash, modified_content)
+
+ if manifest_data['ftype'] == 'dir':
+ collection_dirs.add(
+ os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict'))
+ )
+
+ # Find any paths not in the FILES.json
+ for root, dirs, files in os.walk(b_collection_path):
+ for name in files:
+ full_path = os.path.join(root, name)
+ path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict')
+
+ if full_path not in collection_files:
+ modified_content.append(
+ ModifiedContent(filename=path, expected='the file does not exist', installed='the file exists')
+ )
+ for name in dirs:
+ full_path = os.path.join(root, name)
+ path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict')
+
+ if full_path not in collection_dirs:
+ modified_content.append(
+ ModifiedContent(filename=path, expected='the directory does not exist', installed='the directory exists')
+ )
+
+ if modified_content:
+ result.success = False
+ display.display(
+ 'Collection {fqcn!s} contains modified content '
+ 'in the following files:'.
+ format(fqcn=to_text(local_collection.fqcn)),
+ )
+ for content_change in modified_content:
+ display.display(' %s' % content_change.filename)
+ display.v(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
+ else:
+ what = "are internally consistent with its manifest" if verify_local_only else "match the remote collection"
+ display.display(
+ "Successfully verified that checksums for '{coll!s}' {what!s}.".
+ format(coll=local_collection, what=what),
+ )
+
+ return result
+
+
+def verify_file_signatures(fqcn, manifest_file, detached_signatures, keyring, required_successful_count, ignore_signature_errors):
+ # type: (str, str, list[str], str, str, list[str]) -> bool
+ successful = 0
+ error_messages = []
+
+ signature_count_requirements = re.match(SIGNATURE_COUNT_RE, required_successful_count).groupdict()
+
+ strict = signature_count_requirements['strict'] or False
+ require_all = signature_count_requirements['all']
+ require_count = signature_count_requirements['count']
+ if require_count is not None:
+ require_count = int(require_count)
+
+ for signature in detached_signatures:
+ signature = to_text(signature, errors='surrogate_or_strict')
+ try:
+ verify_file_signature(manifest_file, signature, keyring, ignore_signature_errors)
+ except CollectionSignatureError as error:
+ if error.ignore:
+ # Do not include ignored errors in either the failed or successful count
+ continue
+ error_messages.append(error.report(fqcn))
+ else:
+ successful += 1
+
+ if require_all:
+ continue
+
+ if successful == require_count:
+ break
+
+ if strict and not successful:
+ verified = False
+ display.display(f"Signature verification failed for '{fqcn}': no successful signatures")
+ elif require_all:
+ verified = not error_messages
+ if not verified:
+ display.display(f"Signature verification failed for '{fqcn}': some signatures failed")
+ else:
+ verified = not detached_signatures or require_count == successful
+ if not verified:
+ display.display(f"Signature verification failed for '{fqcn}': fewer successful signatures than required")
+
+ if not verified:
+ for msg in error_messages:
+ display.vvvv(msg)
+
+ return verified
+
+
+def verify_file_signature(manifest_file, detached_signature, keyring, ignore_signature_errors):
+ # type: (str, str, str, list[str]) -> None
+ """Run the gpg command and parse any errors. Raises CollectionSignatureError on failure."""
+ gpg_result, gpg_verification_rc = run_gpg_verify(manifest_file, detached_signature, keyring, display)
+
+ if gpg_result:
+ errors = parse_gpg_errors(gpg_result)
+ try:
+ error = next(errors)
+ except StopIteration:
+ pass
+ else:
+ reasons = []
+ ignored_reasons = 0
+
+ for error in chain([error], errors):
+ # Get error status (dict key) from the class (dict value)
+ status_code = list(GPG_ERROR_MAP.keys())[list(GPG_ERROR_MAP.values()).index(error.__class__)]
+ if status_code in ignore_signature_errors:
+ ignored_reasons += 1
+ reasons.append(error.get_gpg_error_description())
+
+ ignore = len(reasons) == ignored_reasons
+ raise CollectionSignatureError(reasons=set(reasons), stdout=gpg_result, rc=gpg_verification_rc, ignore=ignore)
+
+ if gpg_verification_rc:
+ raise CollectionSignatureError(stdout=gpg_result, rc=gpg_verification_rc)
+
+ # No errors and rc is 0, verify was successful
+ return None
+
+
+def build_collection(u_collection_path, u_output_path, force):
+ # type: (str, str, bool) -> str
+ """Creates the Ansible collection artifact in a .tar.gz file.
+
+ :param u_collection_path: The path to the collection to build. This should be the directory that contains the
+ galaxy.yml file.
+ :param u_output_path: The path to create the collection build artifact. This should be a directory.
+ :param force: Whether to overwrite an existing collection build artifact or fail.
+ :return: The path to the collection build artifact.
+ """
+ b_collection_path = to_bytes(u_collection_path, errors='surrogate_or_strict')
+ try:
+ collection_meta = _get_meta_from_src_dir(b_collection_path)
+ except LookupError as lookup_err:
+ raise_from(AnsibleError(to_native(lookup_err)), lookup_err)
+
+ collection_manifest = _build_manifest(**collection_meta)
+ file_manifest = _build_files_manifest(
+ b_collection_path,
+ collection_meta['namespace'], # type: ignore[arg-type]
+ collection_meta['name'], # type: ignore[arg-type]
+ collection_meta['build_ignore'], # type: ignore[arg-type]
+ collection_meta['manifest'], # type: ignore[arg-type]
+ )
+
+ artifact_tarball_file_name = '{ns!s}-{name!s}-{ver!s}.tar.gz'.format(
+ name=collection_meta['name'],
+ ns=collection_meta['namespace'],
+ ver=collection_meta['version'],
+ )
+ b_collection_output = os.path.join(
+ to_bytes(u_output_path),
+ to_bytes(artifact_tarball_file_name, errors='surrogate_or_strict'),
+ )
+
+ if os.path.exists(b_collection_output):
+ if os.path.isdir(b_collection_output):
+ raise AnsibleError("The output collection artifact '%s' already exists, "
+ "but is a directory - aborting" % to_native(b_collection_output))
+ elif not force:
+ raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
+ "the collection artifact." % to_native(b_collection_output))
+
+ collection_output = _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
+ return collection_output
+
+
+def download_collections(
+ collections, # type: t.Iterable[Requirement]
+ output_path, # type: str
+ apis, # type: t.Iterable[GalaxyAPI]
+ no_deps, # type: bool
+ allow_pre_release, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+): # type: (...) -> None
+ """Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
+ file of the downloaded requirements to be used for an install.
+
+ :param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
+ :param output_path: The path to download the collections to.
+ :param apis: A list of GalaxyAPIs to query when search for a collection.
+ :param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
+ :param no_deps: Ignore any collection dependencies and only download the base requirements.
+ :param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
+ """
+ with _display_progress("Process download dependency map"):
+ dep_map = _resolve_depenency_map(
+ set(collections),
+ galaxy_apis=apis,
+ preferred_candidates=None,
+ concrete_artifacts_manager=artifacts_manager,
+ no_deps=no_deps,
+ allow_pre_release=allow_pre_release,
+ upgrade=False,
+ # Avoid overhead getting signatures since they are not currently applicable to downloaded collections
+ include_signatures=False,
+ offline=False,
+ )
+
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+
+ requirements = []
+ with _display_progress(
+ "Starting collection download process to '{path!s}'".
+ format(path=output_path),
+ ):
+ for fqcn, concrete_coll_pin in dep_map.copy().items(): # FIXME: move into the provider
+ if concrete_coll_pin.is_virtual:
+ display.display(
+ 'Virtual collection {coll!s} is not downloadable'.
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ display.display(
+ u"Downloading collection '{coll!s}' to '{path!s}'".
+ format(coll=to_text(concrete_coll_pin), path=to_text(b_output_path)),
+ )
+
+ b_src_path = (
+ artifacts_manager.get_artifact_path
+ if concrete_coll_pin.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(concrete_coll_pin)
+
+ b_dest_path = os.path.join(
+ b_output_path,
+ os.path.basename(b_src_path),
+ )
+
+ if concrete_coll_pin.is_dir:
+ b_dest_path = to_bytes(
+ build_collection(
+ to_text(b_src_path, errors='surrogate_or_strict'),
+ to_text(output_path, errors='surrogate_or_strict'),
+ force=True,
+ ),
+ errors='surrogate_or_strict',
+ )
+ else:
+ shutil.copy(to_native(b_src_path), to_native(b_dest_path))
+
+ display.display(
+ "Collection '{coll!s}' was downloaded successfully".
+ format(coll=concrete_coll_pin),
+ )
+ requirements.append({
+ # FIXME: Consider using a more specific upgraded format
+ # FIXME: having FQCN in the name field, with src field
+ # FIXME: pointing to the file path, and explicitly set
+ # FIXME: type. If version and name are set, it'd
+ # FIXME: perform validation against the actual metadata
+ # FIXME: in the artifact src points at.
+ 'name': to_native(os.path.basename(b_dest_path)),
+ 'version': concrete_coll_pin.ver,
+ })
+
+ requirements_path = os.path.join(output_path, 'requirements.yml')
+ b_requirements_path = to_bytes(
+ requirements_path, errors='surrogate_or_strict',
+ )
+ display.display(
+ u'Writing requirements.yml file of downloaded collections '
+ "to '{path!s}'".format(path=to_text(requirements_path)),
+ )
+ yaml_bytes = to_bytes(
+ yaml_dump({'collections': requirements}),
+ errors='surrogate_or_strict',
+ )
+ with open(b_requirements_path, mode='wb') as req_fd:
+ req_fd.write(yaml_bytes)
+
+
+def publish_collection(collection_path, api, wait, timeout):
+ """Publish an Ansible collection tarball into an Ansible Galaxy server.
+
+ :param collection_path: The path to the collection tarball to publish.
+ :param api: A GalaxyAPI to publish the collection to.
+ :param wait: Whether to wait until the import process is complete.
+ :param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
+ """
+ import_uri = api.publish_collection(collection_path)
+
+ if wait:
+ # Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
+ # always the task_id, though.
+ # v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
+ # v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
+ task_id = None
+ for path_segment in reversed(import_uri.split('/')):
+ if path_segment:
+ task_id = path_segment
+ break
+
+ if not task_id:
+ raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
+
+ with _display_progress(
+ "Collection has been published to the Galaxy server "
+ "{api.name!s} {api.api_server!s}".format(api=api),
+ ):
+ api.wait_import_task(task_id, timeout)
+ display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
+ % (api.name, api.api_server))
+ else:
+ display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
+ "completed due to --no-wait being set. Import task results can be found at %s"
+ % (api.name, api.api_server, import_uri))
+
+
+def install_collections(
+ collections, # type: t.Iterable[Requirement]
+ output_path, # type: str
+ apis, # type: t.Iterable[GalaxyAPI]
+ ignore_errors, # type: bool
+ no_deps, # type: bool
+ force, # type: bool
+ force_deps, # type: bool
+ upgrade, # type: bool
+ allow_pre_release, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+ disable_gpg_verify, # type: bool
+ offline, # type: bool
+): # type: (...) -> None
+ """Install Ansible collections to the path specified.
+
+ :param collections: The collections to install.
+ :param output_path: The path to install the collections to.
+ :param apis: A list of GalaxyAPIs to query when searching for a collection.
+ :param validate_certs: Whether to validate the certificates if downloading a tarball.
+ :param ignore_errors: Whether to ignore any errors when installing the collection.
+ :param no_deps: Ignore any collection dependencies and only install the base requirements.
+ :param force: Re-install a collection if it has already been installed.
+ :param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
+ """
+ existing_collections = {
+ Requirement(coll.fqcn, coll.ver, coll.src, coll.type, None)
+ for coll in find_existing_collections(output_path, artifacts_manager)
+ }
+
+ unsatisfied_requirements = set(
+ chain.from_iterable(
+ (
+ Requirement.from_dir_path(sub_coll, artifacts_manager)
+ for sub_coll in (
+ artifacts_manager.
+ get_direct_collection_dependencies(install_req).
+ keys()
+ )
+ )
+ if install_req.is_subdirs else (install_req, )
+ for install_req in collections
+ ),
+ )
+ requested_requirements_names = {req.fqcn for req in unsatisfied_requirements}
+
+ # NOTE: Don't attempt to reevaluate already installed deps
+ # NOTE: unless `--force` or `--force-with-deps` is passed
+ unsatisfied_requirements -= set() if force or force_deps else {
+ req
+ for req in unsatisfied_requirements
+ for exs in existing_collections
+ if req.fqcn == exs.fqcn and meets_requirements(exs.ver, req.ver)
+ }
+
+ if not unsatisfied_requirements and not upgrade:
+ display.display(
+ 'Nothing to do. All requested collections are already '
+ 'installed. If you want to reinstall them, '
+ 'consider using `--force`.'
+ )
+ return
+
+ # FIXME: This probably needs to be improved to
+ # FIXME: properly match differing src/type.
+ existing_non_requested_collections = {
+ coll for coll in existing_collections
+ if coll.fqcn not in requested_requirements_names
+ }
+
+ preferred_requirements = (
+ [] if force_deps
+ else existing_non_requested_collections if force
+ else existing_collections
+ )
+ preferred_collections = {
+ # NOTE: No need to include signatures if the collection is already installed
+ Candidate(coll.fqcn, coll.ver, coll.src, coll.type, None)
+ for coll in preferred_requirements
+ }
+ with _display_progress("Process install dependency map"):
+ dependency_map = _resolve_depenency_map(
+ collections,
+ galaxy_apis=apis,
+ preferred_candidates=preferred_collections,
+ concrete_artifacts_manager=artifacts_manager,
+ no_deps=no_deps,
+ allow_pre_release=allow_pre_release,
+ upgrade=upgrade,
+ include_signatures=not disable_gpg_verify,
+ offline=offline,
+ )
+
+ keyring_exists = artifacts_manager.keyring is not None
+ with _display_progress("Starting collection install process"):
+ for fqcn, concrete_coll_pin in dependency_map.items():
+ if concrete_coll_pin.is_virtual:
+ display.vvvv(
+ "'{coll!s}' is virtual, skipping.".
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ if concrete_coll_pin in preferred_collections:
+ display.display(
+ "'{coll!s}' is already installed, skipping.".
+ format(coll=to_text(concrete_coll_pin)),
+ )
+ continue
+
+ if not disable_gpg_verify and concrete_coll_pin.signatures and not keyring_exists:
+ # Duplicate warning msgs are not displayed
+ display.warning(
+ "The GnuPG keyring used for collection signature "
+ "verification was not configured but signatures were "
+ "provided by the Galaxy server to verify authenticity. "
+ "Configure a keyring for ansible-galaxy to use "
+ "or disable signature verification. "
+ "Skipping signature verification."
+ )
+
+ try:
+ install(concrete_coll_pin, output_path, artifacts_manager)
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning(
+ 'Failed to install collection {coll!s} but skipping '
+ 'due to --ignore-errors being set. Error: {error!s}'.
+ format(
+ coll=to_text(concrete_coll_pin),
+ error=to_text(err),
+ )
+ )
+ else:
+ raise
+
+
+# NOTE: imported in ansible.cli.galaxy
+def validate_collection_name(name): # type: (str) -> str
+ """Validates the collection name as an input from the user or a requirements file fit the requirements.
+
+ :param name: The input name with optional range specifier split by ':'.
+ :return: The input value, required for argparse validation.
+ """
+ collection, dummy, dummy = name.partition(':')
+ if AnsibleCollectionRef.is_valid_collection_name(collection):
+ return name
+
+ raise AnsibleError("Invalid collection name '%s', "
+ "name must be in the format <namespace>.<collection>. \n"
+ "Please make sure namespace and collection name contains "
+ "characters from [a-zA-Z0-9_] only." % name)
+
+
+# NOTE: imported in ansible.cli.galaxy
+def validate_collection_path(collection_path): # type: (str) -> str
+ """Ensure a given path ends with 'ansible_collections'
+
+ :param collection_path: The path that should end in 'ansible_collections'
+ :return: collection_path ending in 'ansible_collections' if it does not already.
+ """
+
+ if os.path.split(collection_path)[1] != 'ansible_collections':
+ return os.path.join(collection_path, 'ansible_collections')
+
+ return collection_path
+
+
+def verify_collections(
+ collections, # type: t.Iterable[Requirement]
+ search_paths, # type: t.Iterable[str]
+ apis, # type: t.Iterable[GalaxyAPI]
+ ignore_errors, # type: bool
+ local_verify_only, # type: bool
+ artifacts_manager, # type: ConcreteArtifactsManager
+): # type: (...) -> list[CollectionVerifyResult]
+ r"""Verify the integrity of locally installed collections.
+
+ :param collections: The collections to check.
+ :param search_paths: Locations for the local collection lookup.
+ :param apis: A list of GalaxyAPIs to query when searching for a collection.
+ :param ignore_errors: Whether to ignore any errors when verifying the collection.
+ :param local_verify_only: When True, skip downloads and only verify local manifests.
+ :param artifacts_manager: Artifacts manager.
+ :return: list of CollectionVerifyResult objects describing the results of each collection verification
+ """
+ results = [] # type: list[CollectionVerifyResult]
+
+ api_proxy = MultiGalaxyAPIProxy(apis, artifacts_manager)
+
+ with _display_progress():
+ for collection in collections:
+ try:
+ if collection.is_concrete_artifact:
+ raise AnsibleError(
+ message="'{coll_type!s}' type is not supported. "
+ 'The format namespace.name is expected.'.
+ format(coll_type=collection.type)
+ )
+
+ # NOTE: Verify local collection exists before
+ # NOTE: downloading its source artifact from
+ # NOTE: a galaxy server.
+ default_err = 'Collection %s is not installed in any of the collection paths.' % collection.fqcn
+ for search_path in search_paths:
+ b_search_path = to_bytes(
+ os.path.join(
+ search_path,
+ collection.namespace, collection.name,
+ ),
+ errors='surrogate_or_strict',
+ )
+ if not os.path.isdir(b_search_path):
+ continue
+ if not _is_installed_collection_dir(b_search_path):
+ default_err = (
+ "Collection %s does not have a MANIFEST.json. "
+ "A MANIFEST.json is expected if the collection has been built "
+ "and installed via ansible-galaxy" % collection.fqcn
+ )
+ continue
+
+ local_collection = Candidate.from_dir_path(
+ b_search_path, artifacts_manager,
+ )
+ supplemental_signatures = [
+ get_signature_from_source(source, display)
+ for source in collection.signature_sources or []
+ ]
+ local_collection = Candidate(
+ local_collection.fqcn,
+ local_collection.ver,
+ local_collection.src,
+ local_collection.type,
+ signatures=frozenset(supplemental_signatures),
+ )
+
+ break
+ else:
+ raise AnsibleError(message=default_err)
+
+ if local_verify_only:
+ remote_collection = None
+ else:
+ signatures = api_proxy.get_signatures(local_collection)
+ signatures.extend([
+ get_signature_from_source(source, display)
+ for source in collection.signature_sources or []
+ ])
+
+ remote_collection = Candidate(
+ collection.fqcn,
+ collection.ver if collection.ver != '*'
+ else local_collection.ver,
+ None, 'galaxy',
+ frozenset(signatures),
+ )
+
+ # Download collection on a galaxy server for comparison
+ try:
+ # NOTE: If there are no signatures, trigger the lookup. If found,
+ # NOTE: it'll cache download URL and token in artifact manager.
+ # NOTE: If there are no Galaxy server signatures, only user-provided signature URLs,
+ # NOTE: those alone validate the MANIFEST.json and the remote collection is not downloaded.
+ # NOTE: The remote MANIFEST.json is only used in verification if there are no signatures.
+ if not signatures and not collection.signature_sources:
+ api_proxy.get_collection_version_metadata(
+ remote_collection,
+ )
+ except AnsibleError as e: # FIXME: does this actually emit any errors?
+ # FIXME: extract the actual message and adjust this:
+ expected_error_msg = (
+ 'Failed to find collection {coll.fqcn!s}:{coll.ver!s}'.
+ format(coll=collection)
+ )
+ if e.message == expected_error_msg:
+ raise AnsibleError(
+ 'Failed to find remote collection '
+ "'{coll!s}' on any of the galaxy servers".
+ format(coll=collection)
+ )
+ raise
+
+ result = verify_local_collection(local_collection, remote_collection, artifacts_manager)
+
+ results.append(result)
+
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning(
+ "Failed to verify collection '{coll!s}' but skipping "
+ 'due to --ignore-errors being set. '
+ 'Error: {err!s}'.
+ format(coll=collection, err=to_text(err)),
+ )
+ else:
+ raise
+
+ return results
+
+
+@contextmanager
+def _tempdir():
+ b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
+ try:
+ yield b_temp_path
+ finally:
+ shutil.rmtree(b_temp_path)
+
+
+@contextmanager
+def _display_progress(msg=None):
+ config_display = C.GALAXY_DISPLAY_PROGRESS
+ display_wheel = sys.stdout.isatty() if config_display is None else config_display
+
+ global display
+ if msg is not None:
+ display.display(msg)
+
+ if not display_wheel:
+ yield
+ return
+
+ def progress(display_queue, actual_display):
+ actual_display.debug("Starting display_progress display thread")
+ t = threading.current_thread()
+
+ while True:
+ for c in "|/-\\":
+ actual_display.display(c + "\b", newline=False)
+ time.sleep(0.1)
+
+ # Display a message from the main thread
+ while True:
+ try:
+ method, args, kwargs = display_queue.get(block=False, timeout=0.1)
+ except queue.Empty:
+ break
+ else:
+ func = getattr(actual_display, method)
+ func(*args, **kwargs)
+
+ if getattr(t, "finish", False):
+ actual_display.debug("Received end signal for display_progress display thread")
+ return
+
+ class DisplayThread(object):
+
+ def __init__(self, display_queue):
+ self.display_queue = display_queue
+
+ def __getattr__(self, attr):
+ def call_display(*args, **kwargs):
+ self.display_queue.put((attr, args, kwargs))
+
+ return call_display
+
+ # Temporary override the global display class with our own which add the calls to a queue for the thread to call.
+ old_display = display
+ try:
+ display_queue = queue.Queue()
+ display = DisplayThread(display_queue)
+ t = threading.Thread(target=progress, args=(display_queue, old_display))
+ t.daemon = True
+ t.start()
+
+ try:
+ yield
+ finally:
+ t.finish = True
+ t.join()
+ except Exception:
+ # The exception is re-raised so we can sure the thread is finished and not using the display anymore
+ raise
+ finally:
+ display = old_display
+
+
+def _verify_file_hash(b_path, filename, expected_hash, error_queue):
+ b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
+
+ if not os.path.isfile(b_file_path):
+ actual_hash = None
+ else:
+ with open(b_file_path, mode='rb') as file_object:
+ actual_hash = _consume_file(file_object)
+
+ if expected_hash != actual_hash:
+ error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
+
+
+def _make_manifest():
+ return {
+ 'files': [
+ {
+ 'name': '.',
+ 'ftype': 'dir',
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': MANIFEST_FORMAT,
+ },
+ ],
+ 'format': MANIFEST_FORMAT,
+ }
+
+
+def _make_entry(name, ftype, chksum_type='sha256', chksum=None):
+ return {
+ 'name': name,
+ 'ftype': ftype,
+ 'chksum_type': chksum_type if chksum else None,
+ f'chksum_{chksum_type}': chksum,
+ 'format': MANIFEST_FORMAT
+ }
+
+
+def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns, manifest_control):
+ # type: (bytes, str, str, list[str], dict[str, t.Any]) -> FilesManifestType
+ if ignore_patterns and manifest_control is not Sentinel:
+ raise AnsibleError('"build_ignore" and "manifest" are mutually exclusive')
+
+ if manifest_control is not Sentinel:
+ return _build_files_manifest_distlib(
+ b_collection_path,
+ namespace,
+ name,
+ manifest_control,
+ )
+
+ return _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns)
+
+
+def _build_files_manifest_distlib(b_collection_path, namespace, name, manifest_control):
+ # type: (bytes, str, str, dict[str, t.Any]) -> FilesManifestType
+
+ if not HAS_DISTLIB:
+ raise AnsibleError('Use of "manifest" requires the python "distlib" library')
+
+ if manifest_control is None:
+ manifest_control = {}
+
+ try:
+ control = ManifestControl(**manifest_control)
+ except TypeError as ex:
+ raise AnsibleError(f'Invalid "manifest" provided: {ex}')
+
+ if not is_sequence(control.directives):
+ raise AnsibleError(f'"manifest.directives" must be a list, got: {control.directives.__class__.__name__}')
+
+ if not isinstance(control.omit_default_directives, bool):
+ raise AnsibleError(
+ '"manifest.omit_default_directives" is expected to be a boolean, got: '
+ f'{control.omit_default_directives.__class__.__name__}'
+ )
+
+ if control.omit_default_directives and not control.directives:
+ raise AnsibleError(
+ '"manifest.omit_default_directives" was set to True, but no directives were defined '
+ 'in "manifest.directives". This would produce an empty collection artifact.'
+ )
+
+ directives = []
+ if control.omit_default_directives:
+ directives.extend(control.directives)
+ else:
+ directives.extend([
+ 'include meta/*.yml',
+ 'include *.txt *.md *.rst COPYING LICENSE',
+ 'recursive-include tests **',
+ 'recursive-include docs **.rst **.yml **.yaml **.json **.j2 **.txt',
+ 'recursive-include roles **.yml **.yaml **.json **.j2',
+ 'recursive-include playbooks **.yml **.yaml **.json',
+ 'recursive-include changelogs **.yml **.yaml',
+ 'recursive-include plugins */**.py',
+ ])
+
+ plugins = set(l.package.split('.')[-1] for d, l in get_all_plugin_loaders())
+ for plugin in sorted(plugins):
+ if plugin in ('modules', 'module_utils'):
+ continue
+ elif plugin in C.DOCUMENTABLE_PLUGINS:
+ directives.append(
+ f'recursive-include plugins/{plugin} **.yml **.yaml'
+ )
+
+ directives.extend([
+ 'recursive-include plugins/modules **.ps1 **.yml **.yaml',
+ 'recursive-include plugins/module_utils **.ps1 **.psm1 **.cs',
+ ])
+
+ directives.extend(control.directives)
+
+ directives.extend([
+ f'exclude galaxy.yml galaxy.yaml MANIFEST.json FILES.json {namespace}-{name}-*.tar.gz',
+ 'recursive-exclude tests/output **',
+ 'global-exclude /.* /__pycache__',
+ ])
+
+ display.vvv('Manifest Directives:')
+ display.vvv(textwrap.indent('\n'.join(directives), ' '))
+
+ u_collection_path = to_text(b_collection_path, errors='surrogate_or_strict')
+ m = Manifest(u_collection_path)
+ for directive in directives:
+ try:
+ m.process_directive(directive)
+ except DistlibException as e:
+ raise AnsibleError(f'Invalid manifest directive: {e}')
+ except Exception as e:
+ raise AnsibleError(f'Unknown error processing manifest directive: {e}')
+
+ manifest = _make_manifest()
+
+ for abs_path in m.sorted(wantdirs=True):
+ rel_path = os.path.relpath(abs_path, u_collection_path)
+ if os.path.isdir(abs_path):
+ manifest_entry = _make_entry(rel_path, 'dir')
+ else:
+ manifest_entry = _make_entry(
+ rel_path,
+ 'file',
+ chksum_type='sha256',
+ chksum=secure_hash(abs_path, hash_func=sha256)
+ )
+
+ manifest['files'].append(manifest_entry)
+
+ return manifest
+
+
+def _build_files_manifest_walk(b_collection_path, namespace, name, ignore_patterns):
+ # type: (bytes, str, str, list[str]) -> FilesManifestType
+ # We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
+ # patterns can be extended by the build_ignore key in galaxy.yml
+ b_ignore_patterns = [
+ b'MANIFEST.json',
+ b'FILES.json',
+ b'galaxy.yml',
+ b'galaxy.yaml',
+ b'.git',
+ b'*.pyc',
+ b'*.retry',
+ b'tests/output', # Ignore ansible-test result output directory.
+ to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
+ ]
+ b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
+ b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
+
+ manifest = _make_manifest()
+
+ def _walk(b_path, b_top_level_dir):
+ for b_item in os.listdir(b_path):
+ b_abs_path = os.path.join(b_path, b_item)
+ b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
+ b_rel_path = os.path.join(b_rel_base_dir, b_item)
+ rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
+
+ if os.path.isdir(b_abs_path):
+ if any(b_item == b_path for b_path in b_ignore_dirs) or \
+ any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ if os.path.islink(b_abs_path):
+ b_link_target = os.path.realpath(b_abs_path)
+
+ if not _is_child_path(b_link_target, b_top_level_dir):
+ display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
+ % to_text(b_abs_path))
+ continue
+
+ manifest['files'].append(_make_entry(rel_path, 'dir'))
+
+ if not os.path.islink(b_abs_path):
+ _walk(b_abs_path, b_top_level_dir)
+ else:
+ if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ # Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
+ # a normal file.
+ manifest['files'].append(
+ _make_entry(
+ rel_path,
+ 'file',
+ chksum_type='sha256',
+ chksum=secure_hash(b_abs_path, hash_func=sha256)
+ )
+ )
+
+ _walk(b_collection_path, b_collection_path)
+
+ return manifest
+
+
+# FIXME: accept a dict produced from `galaxy.yml` instead of separate args
+def _build_manifest(namespace, name, version, authors, readme, tags, description, license_file,
+ dependencies, repository, documentation, homepage, issues, **kwargs):
+ manifest = {
+ 'collection_info': {
+ 'namespace': namespace,
+ 'name': name,
+ 'version': version,
+ 'authors': authors,
+ 'readme': readme,
+ 'tags': tags,
+ 'description': description,
+ 'license': kwargs['license'],
+ 'license_file': license_file or None, # Handle galaxy.yml having an empty string (None)
+ 'dependencies': dependencies,
+ 'repository': repository,
+ 'documentation': documentation,
+ 'homepage': homepage,
+ 'issues': issues,
+ },
+ 'file_manifest_file': {
+ 'name': 'FILES.json',
+ 'ftype': 'file',
+ 'chksum_type': 'sha256',
+ 'chksum_sha256': None, # Filled out in _build_collection_tar
+ 'format': MANIFEST_FORMAT
+ },
+ 'format': MANIFEST_FORMAT,
+ }
+
+ return manifest
+
+
+def _build_collection_tar(
+ b_collection_path, # type: bytes
+ b_tar_path, # type: bytes
+ collection_manifest, # type: CollectionManifestType
+ file_manifest, # type: FilesManifestType
+): # type: (...) -> str
+ """Build a tar.gz collection artifact from the manifest data."""
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ with _tempdir() as b_temp_path:
+ b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
+
+ with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
+ # Add the MANIFEST.json and FILES.json file to the archive
+ for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_io = BytesIO(b)
+ tar_info = tarfile.TarInfo(name)
+ tar_info.size = len(b)
+ tar_info.mtime = int(time.time())
+ tar_info.mode = 0o0644
+ tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ for file_info in file_manifest['files']: # type: ignore[union-attr]
+ if file_info['name'] == '.':
+ continue
+
+ # arcname expects a native string, cannot be bytes
+ filename = to_native(file_info['name'], errors='surrogate_or_strict')
+ b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
+
+ def reset_stat(tarinfo):
+ if tarinfo.type != tarfile.SYMTYPE:
+ existing_is_exec = tarinfo.mode & stat.S_IXUSR
+ tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
+ tarinfo.uid = tarinfo.gid = 0
+ tarinfo.uname = tarinfo.gname = ''
+
+ return tarinfo
+
+ if os.path.islink(b_src_path):
+ b_link_target = os.path.realpath(b_src_path)
+ if _is_child_path(b_link_target, b_collection_path):
+ b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
+
+ tar_info = tarfile.TarInfo(filename)
+ tar_info.type = tarfile.SYMTYPE
+ tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
+ tar_info = reset_stat(tar_info)
+ tar_file.addfile(tarinfo=tar_info)
+
+ continue
+
+ # Dealing with a normal file, just add it by name.
+ tar_file.add(
+ to_native(os.path.realpath(b_src_path)),
+ arcname=filename,
+ recursive=False,
+ filter=reset_stat,
+ )
+
+ shutil.copy(to_native(b_tar_filepath), to_native(b_tar_path))
+ collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
+ collection_manifest['collection_info']['name'])
+ tar_path = to_text(b_tar_path)
+ display.display(u'Created collection for %s at %s' % (collection_name, tar_path))
+ return tar_path
+
+
+def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
+ """Build a collection directory from the manifest data.
+
+ This should follow the same pattern as _build_collection_tar.
+ """
+ os.makedirs(b_collection_output, mode=0o0755)
+
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ # Write contents to the files
+ for name, b in [(MANIFEST_FILENAME, collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
+ with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
+ shutil.copyfileobj(b_io, file_obj)
+
+ os.chmod(b_path, 0o0644)
+
+ base_directories = []
+ for file_info in sorted(file_manifest['files'], key=lambda x: x['name']):
+ if file_info['name'] == '.':
+ continue
+
+ src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+ dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+
+ existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
+ mode = 0o0755 if existing_is_exec else 0o0644
+
+ if os.path.isdir(src_file):
+ mode = 0o0755
+ base_directories.append(src_file)
+ os.mkdir(dest_file, mode)
+ else:
+ shutil.copyfile(src_file, dest_file)
+
+ os.chmod(dest_file, mode)
+ collection_output = to_text(b_collection_output)
+ return collection_output
+
+
+def find_existing_collections(path, artifacts_manager):
+ """Locate all collections under a given path.
+
+ :param path: Collection dirs layout search path.
+ :param artifacts_manager: Artifacts manager.
+ """
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # FIXME: consider using `glob.glob()` to simplify looping
+ for b_namespace in os.listdir(b_path):
+ b_namespace_path = os.path.join(b_path, b_namespace)
+ if os.path.isfile(b_namespace_path):
+ continue
+
+ # FIXME: consider feeding b_namespace_path to Candidate.from_dir_path to get subdirs automatically
+ for b_collection in os.listdir(b_namespace_path):
+ b_collection_path = os.path.join(b_namespace_path, b_collection)
+ if not os.path.isdir(b_collection_path):
+ continue
+
+ try:
+ req = Candidate.from_dir_path_as_unknown(b_collection_path, artifacts_manager)
+ except ValueError as val_err:
+ raise_from(AnsibleError(val_err), val_err)
+
+ display.vvv(
+ u"Found installed collection {coll!s} at '{path!s}'".
+ format(coll=to_text(req), path=to_text(req.src))
+ )
+ yield req
+
+
+def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?
+ # type: (Candidate, str, ConcreteArtifactsManager) -> None
+ """Install a collection under a given path.
+
+ :param collection: Collection to be installed.
+ :param path: Collection dirs layout path.
+ :param artifacts_manager: Artifacts manager.
+ """
+ b_artifact_path = (
+ artifacts_manager.get_artifact_path if collection.is_concrete_artifact
+ else artifacts_manager.get_galaxy_artifact_path
+ )(collection)
+
+ collection_path = os.path.join(path, collection.namespace, collection.name)
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ display.display(
+ u"Installing '{coll!s}' to '{path!s}'".
+ format(coll=to_text(collection), path=collection_path),
+ )
+
+ if os.path.exists(b_collection_path):
+ shutil.rmtree(b_collection_path)
+
+ if collection.is_dir:
+ install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)
+ else:
+ install_artifact(
+ b_artifact_path,
+ b_collection_path,
+ artifacts_manager._b_working_directory,
+ collection.signatures,
+ artifacts_manager.keyring,
+ artifacts_manager.required_successful_signature_count,
+ artifacts_manager.ignore_signature_errors,
+ )
+ if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):
+ write_source_metadata(
+ collection,
+ b_collection_path,
+ artifacts_manager
+ )
+
+ display.display(
+ '{coll!s} was installed successfully'.
+ format(coll=to_text(collection)),
+ )
+
+
+def write_source_metadata(collection, b_collection_path, artifacts_manager):
+ # type: (Candidate, bytes, ConcreteArtifactsManager) -> None
+ source_data = artifacts_manager.get_galaxy_artifact_source_info(collection)
+
+ b_yaml_source_data = to_bytes(yaml_dump(source_data), errors='surrogate_or_strict')
+ b_info_dest = collection.construct_galaxy_info_path(b_collection_path)
+ b_info_dir = os.path.split(b_info_dest)[0]
+
+ if os.path.exists(b_info_dir):
+ shutil.rmtree(b_info_dir)
+
+ try:
+ os.mkdir(b_info_dir, mode=0o0755)
+ with open(b_info_dest, mode='w+b') as fd:
+ fd.write(b_yaml_source_data)
+ os.chmod(b_info_dest, 0o0644)
+ except Exception:
+ # Ensure we don't leave the dir behind in case of a failure.
+ if os.path.isdir(b_info_dir):
+ shutil.rmtree(b_info_dir)
+ raise
+
+
+def verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
+ # type: (str, list[str], str, str, list[str]) -> None
+ failed_verify = False
+ coll_path_parts = to_text(manifest_file, errors='surrogate_or_strict').split(os.path.sep)
+ collection_name = '%s.%s' % (coll_path_parts[-3], coll_path_parts[-2]) # get 'ns' and 'coll' from /path/to/ns/coll/MANIFEST.json
+ if not verify_file_signatures(collection_name, manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors):
+ raise AnsibleError(f"Not installing {collection_name} because GnuPG signature verification failed.")
+ display.vvvv(f"GnuPG signature verification succeeded for {collection_name}")
+
+
+def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatures, keyring, required_signature_count, ignore_signature_errors):
+ """Install a collection from tarball under a given path.
+
+ :param b_coll_targz_path: Collection tarball to be installed.
+ :param b_collection_path: Collection dirs layout path.
+ :param b_temp_path: Temporary dir path.
+ :param signatures: frozenset of signatures to verify the MANIFEST.json
+ :param keyring: The keyring used during GPG verification
+ :param required_signature_count: The number of signatures that must successfully verify the collection
+ :param ignore_signature_errors: GPG errors to ignore during signature verification
+ """
+ try:
+ with tarfile.open(b_coll_targz_path, mode='r') as collection_tar:
+ # Verify the signature on the MANIFEST.json before extracting anything else
+ _extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path)
+
+ if keyring is not None:
+ manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME)
+ verify_artifact_manifest(manifest_file, signatures, keyring, required_signature_count, ignore_signature_errors)
+
+ files_member_obj = collection_tar.getmember('FILES.json')
+ with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
+ files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
+
+ _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
+
+ for file_info in files['files']:
+ file_name = file_info['name']
+ if file_name == '.':
+ continue
+
+ if file_info['ftype'] == 'file':
+ _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
+ expected_hash=file_info['chksum_sha256'])
+
+ else:
+ _extract_tar_dir(collection_tar, file_name, b_collection_path)
+
+ except Exception:
+ # Ensure we don't leave the dir behind in case of a failure.
+ shutil.rmtree(b_collection_path)
+
+ b_namespace_path = os.path.dirname(b_collection_path)
+ if not os.listdir(b_namespace_path):
+ os.rmdir(b_namespace_path)
+
+ raise
+
+
+def install_src(collection, b_collection_path, b_collection_output_path, artifacts_manager):
+ r"""Install the collection from source control into given dir.
+
+ Generates the Ansible collection artifact data from a galaxy.yml and
+ installs the artifact to a directory.
+ This should follow the same pattern as build_collection, but instead
+ of creating an artifact, install it.
+
+ :param collection: Collection to be installed.
+ :param b_collection_path: Collection dirs layout path.
+ :param b_collection_output_path: The installation directory for the \
+ collection artifact.
+ :param artifacts_manager: Artifacts manager.
+
+ :raises AnsibleError: If no collection metadata found.
+ """
+ collection_meta = artifacts_manager.get_direct_collection_meta(collection)
+
+ if 'build_ignore' not in collection_meta: # installed collection, not src
+ # FIXME: optimize this? use a different process? copy instead of build?
+ collection_meta['build_ignore'] = []
+ collection_meta['manifest'] = Sentinel
+ collection_manifest = _build_manifest(**collection_meta)
+ file_manifest = _build_files_manifest(
+ b_collection_path,
+ collection_meta['namespace'], collection_meta['name'],
+ collection_meta['build_ignore'],
+ collection_meta['manifest'],
+ )
+
+ collection_output_path = _build_collection_dir(
+ b_collection_path, b_collection_output_path,
+ collection_manifest, file_manifest,
+ )
+
+ display.display(
+ 'Created collection for {coll!s} at {path!s}'.
+ format(coll=collection, path=collection_output_path)
+ )
+
+
+def _extract_tar_dir(tar, dirname, b_dest):
+ """ Extracts a directory from a collection tar. """
+ member_names = [to_native(dirname, errors='surrogate_or_strict')]
+
+ # Create list of members with and without trailing separator
+ if not member_names[-1].endswith(os.path.sep):
+ member_names.append(member_names[-1] + os.path.sep)
+
+ # Try all of the member names and stop on the first one that are able to successfully get
+ for member in member_names:
+ try:
+ tar_member = tar.getmember(member)
+ except KeyError:
+ continue
+ break
+ else:
+ # If we still can't find the member, raise a nice error.
+ raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
+
+ b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
+
+ b_parent_path = os.path.dirname(b_dir_path)
+ try:
+ os.makedirs(b_parent_path, mode=0o0755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(dirname), b_link_path))
+
+ os.symlink(b_link_path, b_dir_path)
+
+ else:
+ if not os.path.isdir(b_dir_path):
+ os.mkdir(b_dir_path, 0o0755)
+
+
+def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
+ """ Extracts a file from a collection tar. """
+ with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
+ if tar_member.type == tarfile.SYMTYPE:
+ actual_hash = _consume_file(tar_obj)
+
+ else:
+ with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
+ actual_hash = _consume_file(tar_obj, tmpfile_obj)
+
+ if expected_hash and actual_hash != expected_hash:
+ raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
+ % (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
+
+ b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
+ b_parent_dir = os.path.dirname(b_dest_filepath)
+ if not _is_child_path(b_parent_dir, b_dest):
+ raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
+ % to_native(filename, errors='surrogate_or_strict'))
+
+ if not os.path.exists(b_parent_dir):
+ # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
+ # makes sure we create the parent directory even if it wasn't set in the metadata.
+ os.makedirs(b_parent_dir, mode=0o0755)
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(filename), b_link_path))
+
+ os.symlink(b_link_path, b_dest_filepath)
+
+ else:
+ shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
+
+ # Default to rw-r--r-- and only add execute if the tar file has execute.
+ tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
+ new_mode = 0o644
+ if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
+ new_mode |= 0o0111
+
+ os.chmod(b_dest_filepath, new_mode)
+
+
+def _get_tar_file_member(tar, filename):
+ n_filename = to_native(filename, errors='surrogate_or_strict')
+ try:
+ member = tar.getmember(n_filename)
+ except KeyError:
+ raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
+ to_native(tar.name),
+ n_filename))
+
+ return _tarfile_extract(tar, member)
+
+
+def _get_json_from_tar_file(b_path, filename):
+ file_contents = ''
+
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ bufsize = 65536
+ data = tar_obj.read(bufsize)
+ while data:
+ file_contents += to_text(data)
+ data = tar_obj.read(bufsize)
+
+ return json.loads(file_contents)
+
+
+def _get_tar_file_hash(b_path, filename):
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ return _consume_file(tar_obj)
+
+
+def _get_file_hash(b_path, filename): # type: (bytes, str) -> str
+ filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
+ with open(filepath, 'rb') as fp:
+ return _consume_file(fp)
+
+
+def _is_child_path(path, parent_path, link_name=None):
+ """ Checks that path is a path within the parent_path specified. """
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if link_name and not os.path.isabs(b_path):
+ # If link_name is specified, path is the source of the link and we need to resolve the absolute path.
+ b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
+ b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
+
+ b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
+ return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
+
+
+def _resolve_depenency_map(
+ requested_requirements, # type: t.Iterable[Requirement]
+ galaxy_apis, # type: t.Iterable[GalaxyAPI]
+ concrete_artifacts_manager, # type: ConcreteArtifactsManager
+ preferred_candidates, # type: t.Iterable[Candidate] | None
+ no_deps, # type: bool
+ allow_pre_release, # type: bool
+ upgrade, # type: bool
+ include_signatures, # type: bool
+ offline, # type: bool
+): # type: (...) -> dict[str, Candidate]
+ """Return the resolved dependency map."""
+ if not HAS_RESOLVELIB:
+ raise AnsibleError("Failed to import resolvelib, check that a supported version is installed")
+ if not HAS_PACKAGING:
+ raise AnsibleError("Failed to import packaging, check that a supported version is installed")
+
+ req = None
+
+ try:
+ dist = distribution('ansible-core')
+ except Exception:
+ pass
+ else:
+ req = next((rr for r in (dist.requires or []) if (rr := PkgReq(r)).name == 'resolvelib'), None)
+ finally:
+ if req is None:
+ # TODO: replace the hardcoded versions with a warning if the dist info is missing
+ # display.warning("Unable to find 'ansible-core' distribution requirements to verify the resolvelib version is supported.")
+ if not RESOLVELIB_LOWERBOUND <= RESOLVELIB_VERSION < RESOLVELIB_UPPERBOUND:
+ raise AnsibleError(
+ f"ansible-galaxy requires resolvelib<{RESOLVELIB_UPPERBOUND.vstring},>={RESOLVELIB_LOWERBOUND.vstring}"
+ )
+ elif not req.specifier.contains(RESOLVELIB_VERSION.vstring):
+ raise AnsibleError(f"ansible-galaxy requires {req.name}{req.specifier}")
+
+ collection_dep_resolver = build_collection_dependency_resolver(
+ galaxy_apis=galaxy_apis,
+ concrete_artifacts_manager=concrete_artifacts_manager,
+ user_requirements=requested_requirements,
+ preferred_candidates=preferred_candidates,
+ with_deps=not no_deps,
+ with_pre_releases=allow_pre_release,
+ upgrade=upgrade,
+ include_signatures=include_signatures,
+ offline=offline,
+ )
+ try:
+ return collection_dep_resolver.resolve(
+ requested_requirements,
+ max_rounds=2000000, # NOTE: same constant pip uses
+ ).mapping
+ except CollectionDependencyResolutionImpossible as dep_exc:
+ conflict_causes = (
+ '* {req.fqcn!s}:{req.ver!s} ({dep_origin!s})'.format(
+ req=req_inf.requirement,
+ dep_origin='direct request'
+ if req_inf.parent is None
+ else 'dependency of {parent!s}'.
+ format(parent=req_inf.parent),
+ )
+ for req_inf in dep_exc.causes
+ )
+ error_msg_lines = list(chain(
+ (
+ 'Failed to resolve the requested '
+ 'dependencies map. Could not satisfy the following '
+ 'requirements:',
+ ),
+ conflict_causes,
+ ))
+ raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717
+ AnsibleError('\n'.join(error_msg_lines)),
+ dep_exc,
+ )
+ except CollectionDependencyInconsistentCandidate as dep_exc:
+ parents = [
+ "%s.%s:%s" % (p.namespace, p.name, p.ver)
+ for p in dep_exc.criterion.iter_parent()
+ if p is not None
+ ]
+
+ error_msg_lines = [
+ (
+ 'Failed to resolve the requested dependencies map. '
+ 'Got the candidate {req.fqcn!s}:{req.ver!s} ({dep_origin!s}) '
+ 'which didn\'t satisfy all of the following requirements:'.
+ format(
+ req=dep_exc.candidate,
+ dep_origin='direct request'
+ if not parents else 'dependency of {parent!s}'.
+ format(parent=', '.join(parents))
+ )
+ )
+ ]
+
+ for req in dep_exc.criterion.iter_requirement():
+ error_msg_lines.append(
+ '* {req.fqcn!s}:{req.ver!s}'.format(req=req)
+ )
+
+ raise raise_from( # NOTE: Leading "raise" is a hack for mypy bug #9717
+ AnsibleError('\n'.join(error_msg_lines)),
+ dep_exc,
+ )
+ except ValueError as exc:
+ raise AnsibleError(to_native(exc)) from exc
diff --git a/lib/ansible/galaxy/collection/concrete_artifact_manager.py b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
new file mode 100644
index 0000000..7c920b8
--- /dev/null
+++ b/lib/ansible/galaxy/collection/concrete_artifact_manager.py
@@ -0,0 +1,755 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Concrete collection candidate management helper module."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import tarfile
+import subprocess
+import typing as t
+
+from contextlib import contextmanager
+from hashlib import sha256
+from urllib.error import URLError
+from urllib.parse import urldefrag
+from shutil import rmtree
+from tempfile import mkdtemp
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement,
+ )
+ from ansible.galaxy.token import GalaxyToken
+
+from ansible.errors import AnsibleError
+from ansible.galaxy import get_collections_galaxy_meta_info
+from ansible.galaxy.dependency_resolution.dataclasses import _GALAXY_YAML
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.module_utils.common.yaml import yaml_load
+from ansible.module_utils.six import raise_from
+from ansible.module_utils.urls import open_url
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+
+import yaml
+
+
+display = Display()
+
+MANIFEST_FILENAME = 'MANIFEST.json'
+
+
+class ConcreteArtifactsManager:
+ """Manager for on-disk collection artifacts.
+
+ It is responsible for:
+ * downloading remote collections from Galaxy-compatible servers and
+ direct links to tarballs or SCM repositories
+ * keeping track of local ones
+ * keeping track of Galaxy API tokens for downloads from Galaxy'ish
+ as well as the artifact hashes
+ * keeping track of Galaxy API signatures for downloads from Galaxy'ish
+ * caching all of above
+ * retrieving the metadata out of the downloaded artifacts
+ """
+ def __init__(self, b_working_directory, validate_certs=True, keyring=None, timeout=60, required_signature_count=None, ignore_signature_errors=None):
+ # type: (bytes, bool, str, int, str, list[str]) -> None
+ """Initialize ConcreteArtifactsManager caches and costraints."""
+ self._validate_certs = validate_certs # type: bool
+ self._artifact_cache = {} # type: dict[bytes, bytes]
+ self._galaxy_artifact_cache = {} # type: dict[Candidate | Requirement, bytes]
+ self._artifact_meta_cache = {} # type: dict[bytes, dict[str, str | list[str] | dict[str, str] | None | t.Type[Sentinel]]]
+ self._galaxy_collection_cache = {} # type: dict[Candidate | Requirement, tuple[str, str, GalaxyToken]]
+ self._galaxy_collection_origin_cache = {} # type: dict[Candidate, tuple[str, list[dict[str, str]]]]
+ self._b_working_directory = b_working_directory # type: bytes
+ self._supplemental_signature_cache = {} # type: dict[str, str]
+ self._keyring = keyring # type: str
+ self.timeout = timeout # type: int
+ self._required_signature_count = required_signature_count # type: str
+ self._ignore_signature_errors = ignore_signature_errors # type: list[str]
+ self._require_build_metadata = True # type: bool
+
+ @property
+ def keyring(self):
+ return self._keyring
+
+ @property
+ def required_successful_signature_count(self):
+ return self._required_signature_count
+
+ @property
+ def ignore_signature_errors(self):
+ if self._ignore_signature_errors is None:
+ return []
+ return self._ignore_signature_errors
+
+ @property
+ def require_build_metadata(self):
+ # type: () -> bool
+ return self._require_build_metadata
+
+ @require_build_metadata.setter
+ def require_build_metadata(self, value):
+ # type: (bool) -> None
+ self._require_build_metadata = value
+
+ def get_galaxy_artifact_source_info(self, collection):
+ # type: (Candidate) -> dict[str, t.Union[str, list[dict[str, str]]]]
+ server = collection.src.api_server
+
+ try:
+ download_url = self._galaxy_collection_cache[collection][0]
+ signatures_url, signatures = self._galaxy_collection_origin_cache[collection]
+ except KeyError as key_err:
+ raise RuntimeError(
+ 'The is no known source for {coll!s}'.
+ format(coll=collection),
+ ) from key_err
+
+ return {
+ "format_version": "1.0.0",
+ "namespace": collection.namespace,
+ "name": collection.name,
+ "version": collection.ver,
+ "server": server,
+ "version_url": signatures_url,
+ "download_url": download_url,
+ "signatures": signatures,
+ }
+
+ def get_galaxy_artifact_path(self, collection):
+ # type: (t.Union[Candidate, Requirement]) -> bytes
+ """Given a Galaxy-stored collection, return a cached path.
+
+ If it's not yet on disk, this method downloads the artifact first.
+ """
+ try:
+ return self._galaxy_artifact_cache[collection]
+ except KeyError:
+ pass
+
+ try:
+ url, sha256_hash, token = self._galaxy_collection_cache[collection]
+ except KeyError as key_err:
+ raise_from(
+ RuntimeError(
+ 'The is no known source for {coll!s}'.
+ format(coll=collection),
+ ),
+ key_err,
+ )
+
+ display.vvvv(
+ "Fetching a collection tarball for '{collection!s}' from "
+ 'Ansible Galaxy'.format(collection=collection),
+ )
+
+ try:
+ b_artifact_path = _download_file(
+ url,
+ self._b_working_directory,
+ expected_hash=sha256_hash,
+ validate_certs=self._validate_certs,
+ token=token,
+ ) # type: bytes
+ except URLError as err:
+ raise_from(
+ AnsibleError(
+ 'Failed to download collection tar '
+ "from '{coll_src!s}': {download_err!s}".
+ format(
+ coll_src=to_native(collection.src),
+ download_err=to_native(err),
+ ),
+ ),
+ err,
+ )
+ else:
+ display.vvv(
+ "Collection '{coll!s}' obtained from "
+ 'server {server!s} {url!s}'.format(
+ coll=collection, server=collection.src or 'Galaxy',
+ url=collection.src.api_server if collection.src is not None
+ else '',
+ )
+ )
+
+ self._galaxy_artifact_cache[collection] = b_artifact_path
+ return b_artifact_path
+
+ def get_artifact_path(self, collection):
+ # type: (t.Union[Candidate, Requirement]) -> bytes
+ """Given a concrete collection pointer, return a cached path.
+
+ If it's not yet on disk, this method downloads the artifact first.
+ """
+ try:
+ return self._artifact_cache[collection.src]
+ except KeyError:
+ pass
+
+ # NOTE: SCM needs to be special-cased as it may contain either
+ # NOTE: one collection in its root, or a number of top-level
+ # NOTE: collection directories instead.
+ # NOTE: The idea is to store the SCM collection as unpacked
+ # NOTE: directory structure under the temporary location and use
+ # NOTE: a "virtual" collection that has pinned requirements on
+ # NOTE: the directories under that SCM checkout that correspond
+ # NOTE: to collections.
+ # NOTE: This brings us to the idea that we need two separate
+ # NOTE: virtual Requirement/Candidate types --
+ # NOTE: (single) dir + (multidir) subdirs
+ if collection.is_url:
+ display.vvvv(
+ "Collection requirement '{collection!s}' is a URL "
+ 'to a tar artifact'.format(collection=collection.fqcn),
+ )
+ try:
+ b_artifact_path = _download_file(
+ collection.src,
+ self._b_working_directory,
+ expected_hash=None, # NOTE: URLs don't support checksums
+ validate_certs=self._validate_certs,
+ timeout=self.timeout
+ )
+ except Exception as err:
+ raise_from(
+ AnsibleError(
+ 'Failed to download collection tar '
+ "from '{coll_src!s}': {download_err!s}".
+ format(
+ coll_src=to_native(collection.src),
+ download_err=to_native(err),
+ ),
+ ),
+ err,
+ )
+ elif collection.is_scm:
+ b_artifact_path = _extract_collection_from_git(
+ collection.src,
+ collection.ver,
+ self._b_working_directory,
+ )
+ elif collection.is_file or collection.is_dir or collection.is_subdirs:
+ b_artifact_path = to_bytes(collection.src)
+ else:
+ # NOTE: This may happen `if collection.is_online_index_pointer`
+ raise RuntimeError(
+ 'The artifact is of an unexpected type {art_type!s}'.
+ format(art_type=collection.type)
+ )
+
+ self._artifact_cache[collection.src] = b_artifact_path
+ return b_artifact_path
+
+ def _get_direct_collection_namespace(self, collection):
+ # type: (Candidate) -> t.Optional[str]
+ return self.get_direct_collection_meta(collection)['namespace'] # type: ignore[return-value]
+
+ def _get_direct_collection_name(self, collection):
+ # type: (Candidate) -> t.Optional[str]
+ return self.get_direct_collection_meta(collection)['name'] # type: ignore[return-value]
+
+ def get_direct_collection_fqcn(self, collection):
+ # type: (Candidate) -> t.Optional[str]
+ """Extract FQCN from the given on-disk collection artifact.
+
+ If the collection is virtual, ``None`` is returned instead
+ of a string.
+ """
+ if collection.is_virtual:
+ # NOTE: should it be something like "<virtual>"?
+ return None
+
+ return '.'.join(( # type: ignore[type-var]
+ self._get_direct_collection_namespace(collection), # type: ignore[arg-type]
+ self._get_direct_collection_name(collection),
+ ))
+
+ def get_direct_collection_version(self, collection):
+ # type: (t.Union[Candidate, Requirement]) -> str
+ """Extract version from the given on-disk collection artifact."""
+ return self.get_direct_collection_meta(collection)['version'] # type: ignore[return-value]
+
+ def get_direct_collection_dependencies(self, collection):
+ # type: (t.Union[Candidate, Requirement]) -> dict[str, str]
+ """Extract deps from the given on-disk collection artifact."""
+ collection_dependencies = self.get_direct_collection_meta(collection)['dependencies']
+ if collection_dependencies is None:
+ collection_dependencies = {}
+ return collection_dependencies # type: ignore[return-value]
+
+ def get_direct_collection_meta(self, collection):
+ # type: (t.Union[Candidate, Requirement]) -> dict[str, t.Union[str, dict[str, str], list[str], None, t.Type[Sentinel]]]
+ """Extract meta from the given on-disk collection artifact."""
+ try: # FIXME: use unique collection identifier as a cache key?
+ return self._artifact_meta_cache[collection.src]
+ except KeyError:
+ b_artifact_path = self.get_artifact_path(collection)
+
+ if collection.is_url or collection.is_file:
+ collection_meta = _get_meta_from_tar(b_artifact_path)
+ elif collection.is_dir: # should we just build a coll instead?
+ # FIXME: what if there's subdirs?
+ try:
+ collection_meta = _get_meta_from_dir(b_artifact_path, self.require_build_metadata)
+ except LookupError as lookup_err:
+ raise_from(
+ AnsibleError(
+ 'Failed to find the collection dir deps: {err!s}'.
+ format(err=to_native(lookup_err)),
+ ),
+ lookup_err,
+ )
+ elif collection.is_scm:
+ collection_meta = {
+ 'name': None,
+ 'namespace': None,
+ 'dependencies': {to_native(b_artifact_path): '*'},
+ 'version': '*',
+ }
+ elif collection.is_subdirs:
+ collection_meta = {
+ 'name': None,
+ 'namespace': None,
+ # NOTE: Dropping b_artifact_path since it's based on src anyway
+ 'dependencies': dict.fromkeys(
+ map(to_native, collection.namespace_collection_paths),
+ '*',
+ ),
+ 'version': '*',
+ }
+ else:
+ raise RuntimeError
+
+ self._artifact_meta_cache[collection.src] = collection_meta
+ return collection_meta
+
+ def save_collection_source(self, collection, url, sha256_hash, token, signatures_url, signatures):
+ # type: (Candidate, str, str, GalaxyToken, str, list[dict[str, str]]) -> None
+ """Store collection URL, SHA256 hash and Galaxy API token.
+
+ This is a hook that is supposed to be called before attempting to
+ download Galaxy-based collections with ``get_galaxy_artifact_path()``.
+ """
+ self._galaxy_collection_cache[collection] = url, sha256_hash, token
+ self._galaxy_collection_origin_cache[collection] = signatures_url, signatures
+
+ @classmethod
+ @contextmanager
+ def under_tmpdir(
+ cls,
+ temp_dir_base, # type: str
+ validate_certs=True, # type: bool
+ keyring=None, # type: str
+ required_signature_count=None, # type: str
+ ignore_signature_errors=None, # type: list[str]
+ require_build_metadata=True, # type: bool
+ ): # type: (...) -> t.Iterator[ConcreteArtifactsManager]
+ """Custom ConcreteArtifactsManager constructor with temp dir.
+
+ This method returns a context manager that allocates and cleans
+ up a temporary directory for caching the collection artifacts
+ during the dependency resolution process.
+ """
+ # NOTE: Can't use `with tempfile.TemporaryDirectory:`
+ # NOTE: because it's not in Python 2 stdlib.
+ temp_path = mkdtemp(
+ dir=to_bytes(temp_dir_base, errors='surrogate_or_strict'),
+ )
+ b_temp_path = to_bytes(temp_path, errors='surrogate_or_strict')
+ try:
+ yield cls(
+ b_temp_path,
+ validate_certs,
+ keyring=keyring,
+ required_signature_count=required_signature_count,
+ ignore_signature_errors=ignore_signature_errors
+ )
+ finally:
+ rmtree(b_temp_path)
+
+
+def parse_scm(collection, version):
+ """Extract name, version, path and subdir out of the SCM pointer."""
+ if ',' in collection:
+ collection, version = collection.split(',', 1)
+ elif version == '*' or not version:
+ version = 'HEAD'
+
+ if collection.startswith('git+'):
+ path = collection[4:]
+ else:
+ path = collection
+
+ path, fragment = urldefrag(path)
+ fragment = fragment.strip(os.path.sep)
+
+ if path.endswith(os.path.sep + '.git'):
+ name = path.split(os.path.sep)[-2]
+ elif '://' not in path and '@' not in path:
+ name = path
+ else:
+ name = path.split('/')[-1]
+ if name.endswith('.git'):
+ name = name[:-4]
+
+ return name, version, path, fragment
+
+
+def _extract_collection_from_git(repo_url, coll_ver, b_path):
+ name, version, git_url, fragment = parse_scm(repo_url, coll_ver)
+ b_checkout_path = mkdtemp(
+ dir=b_path,
+ prefix=to_bytes(name, errors='surrogate_or_strict'),
+ ) # type: bytes
+
+ try:
+ git_executable = get_bin_path('git')
+ except ValueError as err:
+ raise AnsibleError(
+ "Could not find git executable to extract the collection from the Git repository `{repo_url!s}`.".
+ format(repo_url=to_native(git_url))
+ ) from err
+
+ # Perform a shallow clone if simply cloning HEAD
+ if version == 'HEAD':
+ git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text(b_checkout_path)
+ else:
+ git_clone_cmd = git_executable, 'clone', git_url, to_text(b_checkout_path)
+ # FIXME: '--branch', version
+
+ try:
+ subprocess.check_call(git_clone_cmd)
+ except subprocess.CalledProcessError as proc_err:
+ raise_from(
+ AnsibleError( # should probably be LookupError
+ 'Failed to clone a Git repository from `{repo_url!s}`.'.
+ format(repo_url=to_native(git_url)),
+ ),
+ proc_err,
+ )
+
+ git_switch_cmd = git_executable, 'checkout', to_text(version)
+ try:
+ subprocess.check_call(git_switch_cmd, cwd=b_checkout_path)
+ except subprocess.CalledProcessError as proc_err:
+ raise_from(
+ AnsibleError( # should probably be LookupError
+ 'Failed to switch a cloned Git repo `{repo_url!s}` '
+ 'to the requested revision `{commitish!s}`.'.
+ format(
+ commitish=to_native(version),
+ repo_url=to_native(git_url),
+ ),
+ ),
+ proc_err,
+ )
+
+ return (
+ os.path.join(b_checkout_path, to_bytes(fragment))
+ if fragment else b_checkout_path
+ )
+
+
+# FIXME: use random subdirs while preserving the file names
+def _download_file(url, b_path, expected_hash, validate_certs, token=None, timeout=60):
+ # type: (str, bytes, t.Optional[str], bool, GalaxyToken, int) -> bytes
+ # ^ NOTE: used in download and verify_collections ^
+ b_tarball_name = to_bytes(
+ url.rsplit('/', 1)[1], errors='surrogate_or_strict',
+ )
+ b_file_name = b_tarball_name[:-len('.tar.gz')]
+
+ b_tarball_dir = mkdtemp(
+ dir=b_path,
+ prefix=b'-'.join((b_file_name, b'')),
+ ) # type: bytes
+
+ b_file_path = os.path.join(b_tarball_dir, b_tarball_name)
+
+ display.display("Downloading %s to %s" % (url, to_text(b_tarball_dir)))
+ # NOTE: Galaxy redirects downloads to S3 which rejects the request
+ # NOTE: if an Authorization header is attached so don't redirect it
+ resp = open_url(
+ to_native(url, errors='surrogate_or_strict'),
+ validate_certs=validate_certs,
+ headers=None if token is None else token.headers(),
+ unredirected_headers=['Authorization'], http_agent=user_agent(),
+ timeout=timeout
+ )
+
+ with open(b_file_path, 'wb') as download_file: # type: t.BinaryIO
+ actual_hash = _consume_file(resp, write_to=download_file)
+
+ if expected_hash:
+ display.vvvv(
+ 'Validating downloaded file hash {actual_hash!s} with '
+ 'expected hash {expected_hash!s}'.
+ format(actual_hash=actual_hash, expected_hash=expected_hash)
+ )
+ if expected_hash != actual_hash:
+ raise AnsibleError('Mismatch artifact hash with downloaded file')
+
+ return b_file_path
+
+
+def _consume_file(read_from, write_to=None):
+ # type: (t.BinaryIO, t.BinaryIO) -> str
+ bufsize = 65536
+ sha256_digest = sha256()
+ data = read_from.read(bufsize)
+ while data:
+ if write_to is not None:
+ write_to.write(data)
+ write_to.flush()
+ sha256_digest.update(data)
+ data = read_from.read(bufsize)
+
+ return sha256_digest.hexdigest()
+
+
+def _normalize_galaxy_yml_manifest(
+ galaxy_yml, # type: dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ b_galaxy_yml_path, # type: bytes
+ require_build_metadata=True, # type: bool
+):
+ # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ galaxy_yml_schema = (
+ get_collections_galaxy_meta_info()
+ ) # type: list[dict[str, t.Any]] # FIXME: <--
+ # FIXME: 👆maybe precise type: list[dict[str, t.Union[bool, str, list[str]]]]
+
+ mandatory_keys = set()
+ string_keys = set() # type: set[str]
+ list_keys = set() # type: set[str]
+ dict_keys = set() # type: set[str]
+ sentinel_keys = set() # type: set[str]
+
+ for info in galaxy_yml_schema:
+ if info.get('required', False):
+ mandatory_keys.add(info['key'])
+
+ key_list_type = {
+ 'str': string_keys,
+ 'list': list_keys,
+ 'dict': dict_keys,
+ 'sentinel': sentinel_keys,
+ }[info.get('type', 'str')]
+ key_list_type.add(info['key'])
+
+ all_keys = frozenset(mandatory_keys | string_keys | list_keys | dict_keys | sentinel_keys)
+
+ set_keys = set(galaxy_yml.keys())
+ missing_keys = mandatory_keys.difference(set_keys)
+ if missing_keys:
+ msg = (
+ "The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
+ % (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys)))
+ )
+ if require_build_metadata:
+ raise AnsibleError(msg)
+ display.warning(msg)
+ raise ValueError(msg)
+
+ extra_keys = set_keys.difference(all_keys)
+ if len(extra_keys) > 0:
+ display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
+ % (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
+
+ # Add the defaults if they have not been set
+ for optional_string in string_keys:
+ if optional_string not in galaxy_yml:
+ galaxy_yml[optional_string] = None
+
+ for optional_list in list_keys:
+ list_val = galaxy_yml.get(optional_list, None)
+
+ if list_val is None:
+ galaxy_yml[optional_list] = []
+ elif not isinstance(list_val, list):
+ galaxy_yml[optional_list] = [list_val] # type: ignore[list-item]
+
+ for optional_dict in dict_keys:
+ if optional_dict not in galaxy_yml:
+ galaxy_yml[optional_dict] = {}
+
+ for optional_sentinel in sentinel_keys:
+ if optional_sentinel not in galaxy_yml:
+ galaxy_yml[optional_sentinel] = Sentinel
+
+ # NOTE: `version: null` is only allowed for `galaxy.yml`
+ # NOTE: and not `MANIFEST.json`. The use-case for it is collections
+ # NOTE: that generate the version from Git before building a
+ # NOTE: distributable tarball artifact.
+ if not galaxy_yml.get('version'):
+ galaxy_yml['version'] = '*'
+
+ return galaxy_yml
+
+
+def _get_meta_from_dir(
+ b_path, # type: bytes
+ require_build_metadata=True, # type: bool
+): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ try:
+ return _get_meta_from_installed_dir(b_path)
+ except LookupError:
+ return _get_meta_from_src_dir(b_path, require_build_metadata)
+
+
+def _get_meta_from_src_dir(
+ b_path, # type: bytes
+ require_build_metadata=True, # type: bool
+): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ galaxy_yml = os.path.join(b_path, _GALAXY_YAML)
+ if not os.path.isfile(galaxy_yml):
+ raise LookupError(
+ "The collection galaxy.yml path '{path!s}' does not exist.".
+ format(path=to_native(galaxy_yml))
+ )
+
+ with open(galaxy_yml, 'rb') as manifest_file_obj:
+ try:
+ manifest = yaml_load(manifest_file_obj)
+ except yaml.error.YAMLError as yaml_err:
+ raise_from(
+ AnsibleError(
+ "Failed to parse the galaxy.yml at '{path!s}' with "
+ 'the following error:\n{err_txt!s}'.
+ format(
+ path=to_native(galaxy_yml),
+ err_txt=to_native(yaml_err),
+ ),
+ ),
+ yaml_err,
+ )
+
+ if not isinstance(manifest, dict):
+ if require_build_metadata:
+ raise AnsibleError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
+ # Valid build metadata is not required by ansible-galaxy list. Raise ValueError to fall back to implicit metadata.
+ display.warning(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
+ raise ValueError(f"The collection galaxy.yml at '{to_native(galaxy_yml)}' is incorrectly formatted.")
+
+ return _normalize_galaxy_yml_manifest(manifest, galaxy_yml, require_build_metadata)
+
+
+def _get_json_from_installed_dir(
+ b_path, # type: bytes
+ filename, # type: str
+): # type: (...) -> dict
+
+ b_json_filepath = os.path.join(b_path, to_bytes(filename, errors='surrogate_or_strict'))
+
+ try:
+ with open(b_json_filepath, 'rb') as manifest_fd:
+ b_json_text = manifest_fd.read()
+ except (IOError, OSError):
+ raise LookupError(
+ "The collection {manifest!s} path '{path!s}' does not exist.".
+ format(
+ manifest=filename,
+ path=to_native(b_json_filepath),
+ )
+ )
+
+ manifest_txt = to_text(b_json_text, errors='surrogate_or_strict')
+
+ try:
+ manifest = json.loads(manifest_txt)
+ except ValueError:
+ raise AnsibleError(
+ 'Collection tar file member {member!s} does not '
+ 'contain a valid json string.'.
+ format(member=filename),
+ )
+
+ return manifest
+
+
+def _get_meta_from_installed_dir(
+ b_path, # type: bytes
+): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ manifest = _get_json_from_installed_dir(b_path, MANIFEST_FILENAME)
+ collection_info = manifest['collection_info']
+
+ version = collection_info.get('version')
+ if not version:
+ raise AnsibleError(
+ u'Collection metadata file `{manifest_filename!s}` at `{meta_file!s}` is expected '
+ u'to have a valid SemVer version value but got {version!s}'.
+ format(
+ manifest_filename=MANIFEST_FILENAME,
+ meta_file=to_text(b_path),
+ version=to_text(repr(version)),
+ ),
+ )
+
+ return collection_info
+
+
+def _get_meta_from_tar(
+ b_path, # type: bytes
+): # type: (...) -> dict[str, t.Union[str, list[str], dict[str, str], None, t.Type[Sentinel]]]
+ if not tarfile.is_tarfile(b_path):
+ raise AnsibleError(
+ "Collection artifact at '{path!s}' is not a valid tar file.".
+ format(path=to_native(b_path)),
+ )
+
+ with tarfile.open(b_path, mode='r') as collection_tar: # type: tarfile.TarFile
+ try:
+ member = collection_tar.getmember(MANIFEST_FILENAME)
+ except KeyError:
+ raise AnsibleError(
+ "Collection at '{path!s}' does not contain the "
+ 'required file {manifest_file!s}.'.
+ format(
+ path=to_native(b_path),
+ manifest_file=MANIFEST_FILENAME,
+ ),
+ )
+
+ with _tarfile_extract(collection_tar, member) as (_member, member_obj):
+ if member_obj is None:
+ raise AnsibleError(
+ 'Collection tar file does not contain '
+ 'member {member!s}'.format(member=MANIFEST_FILENAME),
+ )
+
+ text_content = to_text(
+ member_obj.read(),
+ errors='surrogate_or_strict',
+ )
+
+ try:
+ manifest = json.loads(text_content)
+ except ValueError:
+ raise AnsibleError(
+ 'Collection tar file member {member!s} does not '
+ 'contain a valid json string.'.
+ format(member=MANIFEST_FILENAME),
+ )
+ return manifest['collection_info']
+
+
+@contextmanager
+def _tarfile_extract(
+ tar, # type: tarfile.TarFile
+ member, # type: tarfile.TarInfo
+):
+ # type: (...) -> t.Iterator[tuple[tarfile.TarInfo, t.Optional[t.IO[bytes]]]]
+ tar_obj = tar.extractfile(member)
+ try:
+ yield member, tar_obj
+ finally:
+ if tar_obj is not None:
+ tar_obj.close()
diff --git a/lib/ansible/galaxy/collection/galaxy_api_proxy.py b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
new file mode 100644
index 0000000..51e0c9f
--- /dev/null
+++ b/lib/ansible/galaxy/collection/galaxy_api_proxy.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""A facade for interfacing with multiple Galaxy instances."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import typing as t
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.api import CollectionVersionMetadata
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate, Requirement,
+ )
+
+from ansible.galaxy.api import GalaxyAPI, GalaxyError
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class MultiGalaxyAPIProxy:
+ """A proxy that abstracts talking to multiple Galaxy instances."""
+
+ def __init__(self, apis, concrete_artifacts_manager, offline=False):
+ # type: (t.Iterable[GalaxyAPI], ConcreteArtifactsManager, bool) -> None
+ """Initialize the target APIs list."""
+ self._apis = apis
+ self._concrete_art_mgr = concrete_artifacts_manager
+ self._offline = offline # Prevent all GalaxyAPI calls
+
+ @property
+ def is_offline_mode_requested(self):
+ return self._offline
+
+ def _assert_that_offline_mode_is_not_requested(self): # type: () -> None
+ if self.is_offline_mode_requested:
+ raise NotImplementedError("The calling code is not supposed to be invoked in 'offline' mode.")
+
+ def _get_collection_versions(self, requirement):
+ # type: (Requirement) -> t.Iterator[tuple[GalaxyAPI, str]]
+ """Helper for get_collection_versions.
+
+ Yield api, version pairs for all APIs,
+ and reraise the last error if no valid API was found.
+ """
+ if self._offline:
+ return []
+
+ found_api = False
+ last_error = None # type: Exception | None
+
+ api_lookup_order = (
+ (requirement.src, )
+ if isinstance(requirement.src, GalaxyAPI)
+ else self._apis
+ )
+
+ for api in api_lookup_order:
+ try:
+ versions = api.get_collection_versions(requirement.namespace, requirement.name)
+ except GalaxyError as api_err:
+ last_error = api_err
+ except Exception as unknown_err:
+ display.warning(
+ "Skipping Galaxy server {server!s}. "
+ "Got an unexpected error when getting "
+ "available versions of collection {fqcn!s}: {err!s}".
+ format(
+ server=api.api_server,
+ fqcn=requirement.fqcn,
+ err=to_text(unknown_err),
+ )
+ )
+ last_error = unknown_err
+ else:
+ found_api = True
+ for version in versions:
+ yield api, version
+
+ if not found_api and last_error is not None:
+ raise last_error
+
+ def get_collection_versions(self, requirement):
+ # type: (Requirement) -> t.Iterable[tuple[str, GalaxyAPI]]
+ """Get a set of unique versions for FQCN on Galaxy servers."""
+ if requirement.is_concrete_artifact:
+ return {
+ (
+ self._concrete_art_mgr.
+ get_direct_collection_version(requirement),
+ requirement.src,
+ ),
+ }
+
+ api_lookup_order = (
+ (requirement.src, )
+ if isinstance(requirement.src, GalaxyAPI)
+ else self._apis
+ )
+ return set(
+ (version, api)
+ for api, version in self._get_collection_versions(
+ requirement,
+ )
+ )
+
+ def get_collection_version_metadata(self, collection_candidate):
+ # type: (Candidate) -> CollectionVersionMetadata
+ """Retrieve collection metadata of a given candidate."""
+ self._assert_that_offline_mode_is_not_requested()
+
+ api_lookup_order = (
+ (collection_candidate.src, )
+ if isinstance(collection_candidate.src, GalaxyAPI)
+ else self._apis
+ )
+
+ last_err: t.Optional[Exception]
+
+ for api in api_lookup_order:
+ try:
+ version_metadata = api.get_collection_version_metadata(
+ collection_candidate.namespace,
+ collection_candidate.name,
+ collection_candidate.ver,
+ )
+ except GalaxyError as api_err:
+ last_err = api_err
+ except Exception as unknown_err:
+ # `verify` doesn't use `get_collection_versions` since the version is already known.
+ # Do the same as `install` and `download` by trying all APIs before failing.
+ # Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
+ last_err = unknown_err
+ display.warning(
+ "Skipping Galaxy server {server!s}. "
+ "Got an unexpected error when getting "
+ "available versions of collection {fqcn!s}: {err!s}".
+ format(
+ server=api.api_server,
+ fqcn=collection_candidate.fqcn,
+ err=to_text(unknown_err),
+ )
+ )
+ else:
+ self._concrete_art_mgr.save_collection_source(
+ collection_candidate,
+ version_metadata.download_url,
+ version_metadata.artifact_sha256,
+ api.token,
+ version_metadata.signatures_url,
+ version_metadata.signatures,
+ )
+ return version_metadata
+
+ raise last_err
+
+ def get_collection_dependencies(self, collection_candidate):
+ # type: (Candidate) -> dict[str, str]
+ # FIXME: return Requirement instances instead?
+ """Retrieve collection dependencies of a given candidate."""
+ if collection_candidate.is_concrete_artifact:
+ return (
+ self.
+ _concrete_art_mgr.
+ get_direct_collection_dependencies
+ )(collection_candidate)
+
+ return (
+ self.
+ get_collection_version_metadata(collection_candidate).
+ dependencies
+ )
+
+ def get_signatures(self, collection_candidate):
+ # type: (Candidate) -> list[str]
+ self._assert_that_offline_mode_is_not_requested()
+ namespace = collection_candidate.namespace
+ name = collection_candidate.name
+ version = collection_candidate.ver
+ last_err = None # type: Exception | None
+
+ api_lookup_order = (
+ (collection_candidate.src, )
+ if isinstance(collection_candidate.src, GalaxyAPI)
+ else self._apis
+ )
+
+ for api in api_lookup_order:
+ try:
+ return api.get_collection_signatures(namespace, name, version)
+ except GalaxyError as api_err:
+ last_err = api_err
+ except Exception as unknown_err:
+ # Warn for debugging purposes, since the Galaxy server may be unexpectedly down.
+ last_err = unknown_err
+ display.warning(
+ "Skipping Galaxy server {server!s}. "
+ "Got an unexpected error when getting "
+ "available versions of collection {fqcn!s}: {err!s}".
+ format(
+ server=api.api_server,
+ fqcn=collection_candidate.fqcn,
+ err=to_text(unknown_err),
+ )
+ )
+ if last_err:
+ raise last_err
+
+ return []
diff --git a/lib/ansible/galaxy/collection/gpg.py b/lib/ansible/galaxy/collection/gpg.py
new file mode 100644
index 0000000..8641f0d
--- /dev/null
+++ b/lib/ansible/galaxy/collection/gpg.py
@@ -0,0 +1,282 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2022, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Signature verification helpers."""
+
+from ansible.errors import AnsibleError
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils.urls import open_url
+
+import contextlib
+import os
+import subprocess
+import sys
+import typing as t
+
+from dataclasses import dataclass, fields as dc_fields
+from functools import partial
+from urllib.error import HTTPError, URLError
+
+if t.TYPE_CHECKING:
+ from ansible.utils.display import Display
+
+IS_PY310_PLUS = sys.version_info[:2] >= (3, 10)
+
+frozen_dataclass = partial(dataclass, frozen=True, **({'slots': True} if IS_PY310_PLUS else {}))
+
+
+def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str
+ if display is not None:
+ display.vvvv(f"Using signature at {source}")
+ try:
+ with open_url(
+ source,
+ http_agent=user_agent(),
+ validate_certs=True,
+ follow_redirects='safe'
+ ) as resp:
+ signature = resp.read()
+ except (HTTPError, URLError) as e:
+ raise AnsibleError(
+ f"Failed to get signature for collection verification from '{source}': {e}"
+ ) from e
+
+ return signature
+
+
+def run_gpg_verify(
+ manifest_file, # type: str
+ signature, # type: str
+ keyring, # type: str
+ display, # type: Display
+): # type: (...) -> tuple[str, int]
+ status_fd_read, status_fd_write = os.pipe()
+
+ # running the gpg command will create the keyring if it does not exist
+ remove_keybox = not os.path.exists(keyring)
+
+ cmd = [
+ 'gpg',
+ f'--status-fd={status_fd_write}',
+ '--verify',
+ '--batch',
+ '--no-tty',
+ '--no-default-keyring',
+ f'--keyring={keyring}',
+ '-',
+ manifest_file,
+ ]
+ cmd_str = ' '.join(cmd)
+ display.vvvv(f"Running command '{cmd}'")
+
+ try:
+ p = subprocess.Popen(
+ cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ pass_fds=(status_fd_write,),
+ encoding='utf8',
+ )
+ except (FileNotFoundError, subprocess.SubprocessError) as err:
+ raise AnsibleError(
+ f"Failed during GnuPG verification with command '{cmd_str}': {err}"
+ ) from err
+ else:
+ stdout, stderr = p.communicate(input=signature)
+ finally:
+ os.close(status_fd_write)
+
+ if remove_keybox:
+ with contextlib.suppress(OSError):
+ os.remove(keyring)
+
+ with os.fdopen(status_fd_read) as f:
+ stdout = f.read()
+ display.vvvv(
+ f"stdout: \n{stdout}\nstderr: \n{stderr}\n(exit code {p.returncode})"
+ )
+ return stdout, p.returncode
+
+
+def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError]
+ for line in status_out.splitlines():
+ if not line:
+ continue
+ try:
+ _dummy, status, remainder = line.split(maxsplit=2)
+ except ValueError:
+ _dummy, status = line.split(maxsplit=1)
+ remainder = None
+
+ try:
+ cls = GPG_ERROR_MAP[status]
+ except KeyError:
+ continue
+
+ fields = [status]
+ if remainder:
+ fields.extend(
+ remainder.split(
+ None,
+ len(dc_fields(cls)) - 2
+ )
+ )
+
+ yield cls(*fields)
+
+
+@frozen_dataclass
+class GpgBaseError(Exception):
+ status: str
+
+ @classmethod
+ def get_gpg_error_description(cls) -> str:
+ """Return the current class description."""
+ return ' '.join(cls.__doc__.split())
+
+ def __post_init__(self):
+ for field in dc_fields(self):
+ super(GpgBaseError, self).__setattr__(field.name, field.type(getattr(self, field.name)))
+
+
+@frozen_dataclass
+class GpgExpSig(GpgBaseError):
+ """The signature with the keyid is good, but the signature is expired."""
+ keyid: str
+ username: str
+
+
+@frozen_dataclass
+class GpgExpKeySig(GpgBaseError):
+ """The signature with the keyid is good, but the signature was made by an expired key."""
+ keyid: str
+ username: str
+
+
+@frozen_dataclass
+class GpgRevKeySig(GpgBaseError):
+ """The signature with the keyid is good, but the signature was made by a revoked key."""
+ keyid: str
+ username: str
+
+
+@frozen_dataclass
+class GpgBadSig(GpgBaseError):
+ """The signature with the keyid has not been verified okay."""
+ keyid: str
+ username: str
+
+
+@frozen_dataclass
+class GpgErrSig(GpgBaseError):
+ """"It was not possible to check the signature. This may be caused by
+ a missing public key or an unsupported algorithm. A RC of 4
+ indicates unknown algorithm, a 9 indicates a missing public
+ key.
+ """
+ keyid: str
+ pkalgo: int
+ hashalgo: int
+ sig_class: str
+ time: int
+ rc: int
+ fpr: str
+
+
+@frozen_dataclass
+class GpgNoPubkey(GpgBaseError):
+ """The public key is not available."""
+ keyid: str
+
+
+@frozen_dataclass
+class GpgMissingPassPhrase(GpgBaseError):
+ """No passphrase was supplied."""
+
+
+@frozen_dataclass
+class GpgBadPassphrase(GpgBaseError):
+ """The supplied passphrase was wrong or not given."""
+ keyid: str
+
+
+@frozen_dataclass
+class GpgNoData(GpgBaseError):
+ """No data has been found. Codes for WHAT are:
+ - 1 :: No armored data.
+ - 2 :: Expected a packet but did not find one.
+ - 3 :: Invalid packet found, this may indicate a non OpenPGP
+ message.
+ - 4 :: Signature expected but not found.
+ """
+ what: str
+
+
+@frozen_dataclass
+class GpgUnexpected(GpgBaseError):
+ """No data has been found. Codes for WHAT are:
+ - 1 :: No armored data.
+ - 2 :: Expected a packet but did not find one.
+ - 3 :: Invalid packet found, this may indicate a non OpenPGP
+ message.
+ - 4 :: Signature expected but not found.
+ """
+ what: str
+
+
+@frozen_dataclass
+class GpgError(GpgBaseError):
+ """This is a generic error status message, it might be followed by error location specific data."""
+ location: str
+ code: int
+ more: str = ""
+
+
+@frozen_dataclass
+class GpgFailure(GpgBaseError):
+ """This is the counterpart to SUCCESS and used to indicate a program failure."""
+ location: str
+ code: int
+
+
+@frozen_dataclass
+class GpgBadArmor(GpgBaseError):
+ """The ASCII armor is corrupted."""
+
+
+@frozen_dataclass
+class GpgKeyExpired(GpgBaseError):
+ """The key has expired."""
+ timestamp: int
+
+
+@frozen_dataclass
+class GpgKeyRevoked(GpgBaseError):
+ """The used key has been revoked by its owner."""
+
+
+@frozen_dataclass
+class GpgNoSecKey(GpgBaseError):
+ """The secret key is not available."""
+ keyid: str
+
+
+GPG_ERROR_MAP = {
+ 'EXPSIG': GpgExpSig,
+ 'EXPKEYSIG': GpgExpKeySig,
+ 'REVKEYSIG': GpgRevKeySig,
+ 'BADSIG': GpgBadSig,
+ 'ERRSIG': GpgErrSig,
+ 'NO_PUBKEY': GpgNoPubkey,
+ 'MISSING_PASSPHRASE': GpgMissingPassPhrase,
+ 'BAD_PASSPHRASE': GpgBadPassphrase,
+ 'NODATA': GpgNoData,
+ 'UNEXPECTED': GpgUnexpected,
+ 'ERROR': GpgError,
+ 'FAILURE': GpgFailure,
+ 'BADARMOR': GpgBadArmor,
+ 'KEYEXPIRED': GpgKeyExpired,
+ 'KEYREVOKED': GpgKeyRevoked,
+ 'NO_SECKEY': GpgNoSecKey,
+}
diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2
new file mode 100644
index 0000000..4d99a8b
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2
@@ -0,0 +1,9 @@
+FROM ansibleplaybookbundle/apb-base
+
+LABEL "com.redhat.apb.spec"=\
+""
+
+COPY playbooks /opt/apb/actions
+COPY . /opt/ansible/roles/{{ role_name }}
+RUN chmod -R g=u /opt/{ansible,apb}
+USER apb
diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2
new file mode 100644
index 0000000..ebeaa61
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/Makefile.j2
@@ -0,0 +1,21 @@
+DOCKERHOST = DOCKERHOST
+DOCKERORG = DOCKERORG
+IMAGENAME = {{ role_name }}
+TAG = latest
+USER=$(shell id -u)
+PWD=$(shell pwd)
+build_and_push: apb_build docker_push apb_push
+
+.PHONY: apb_build
+apb_build:
+ docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest prepare
+ docker build -t $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG) .
+
+.PHONY: docker_push
+docker_push:
+ docker push $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG)
+
+.PHONY: apb_push
+apb_push:
+ docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest push
+
diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md
new file mode 100644
index 0000000..2e350a0
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/README.md
@@ -0,0 +1,38 @@
+APB Name
+=========
+
+A brief description of the APB goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+APB Variables
+--------------
+
+A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your APB (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2
new file mode 100644
index 0000000..f968801
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/apb.yml.j2
@@ -0,0 +1,13 @@
+version: '1.0.0'
+name: {{ role_name }}
+description: {{ description }}
+bindable: False
+async: optional
+metadata:
+ displayName: {{ role_name }}
+plans:
+ - name: default
+ description: This default plan deploys {{ role_name }}
+ free: True
+ metadata: {}
+ parameters: []
diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
new file mode 100644
index 0000000..3818e64
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/files/.git_keep b/lib/ansible/galaxy/data/apb/files/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/files/.git_keep
diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
new file mode 100644
index 0000000..3f4c496
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
new file mode 100644
index 0000000..862f8ef
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
@@ -0,0 +1,44 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ #
+ # platforms is a list of platforms, and each platform has a name and a list of versions.
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags:
+ - apb
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
new file mode 100644
index 0000000..1952731
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
@@ -0,0 +1,8 @@
+- name: "{{ role_name }} playbook to deprovision the application"
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ vars:
+ apb_action: deprovision
+ roles:
+ - role: {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
new file mode 100644
index 0000000..7b08605
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
@@ -0,0 +1,8 @@
+- name: "{{ role_name }} playbook to provision the application"
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ vars:
+ apb_action: provision
+ roles:
+ - role: {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
new file mode 100644
index 0000000..a988065
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/templates/.git_keep b/lib/ansible/galaxy/data/apb/templates/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
new file mode 100644
index 0000000..2f74f1b
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+inventory=./inventory
diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory
new file mode 100644
index 0000000..ea69cbf
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/inventory
@@ -0,0 +1,3 @@
+localhost
+
+
diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
new file mode 100644
index 0000000..fb14f85
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
@@ -0,0 +1,7 @@
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ tasks:
+
+ # Add tasks and assertions for testing the service here.
diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
new file mode 100644
index 0000000..092d511
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
new file mode 100644
index 0000000..5c4472c
--- /dev/null
+++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
@@ -0,0 +1,120 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# key: The name of the key as defined in galaxy.yml
+# description: Comment/info on the key to be used as the generated doc and auto generated skeleton galaxy.yml file
+# required: Whether the key is required (default is no)
+# type: The type of value that can be set, aligns to the values in the plugin formatter
+---
+- key: namespace
+ description:
+ - The namespace of the collection.
+ - This can be a company/brand/organization or product namespace under which all content lives.
+ - May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with underscores or
+ numbers and cannot contain consecutive underscores.
+ required: yes
+ type: str
+
+- key: name
+ description:
+ - The name of the collection.
+ - Has the same character restrictions as C(namespace).
+ required: yes
+ type: str
+
+- key: version
+ description:
+ - The version of the collection.
+ - Must be compatible with semantic versioning.
+ required: yes
+ type: str
+
+- key: readme
+ description:
+ - The path to the Markdown (.md) readme file.
+ - This path is relative to the root of the collection.
+ required: yes
+ type: str
+
+- key: authors
+ description:
+ - A list of the collection's content authors.
+ - Can be just the name or in the format 'Full Name <email> (url) @nicks:irc/im.site#channel'.
+ required: yes
+ type: list
+
+- key: description
+ description:
+ - A short summary description of the collection.
+ type: str
+
+- key: license
+ description:
+ - Either a single license or a list of licenses for content inside of a collection.
+ - Ansible Galaxy currently only accepts L(SPDX,https://spdx.org/licenses/) licenses
+ - This key is mutually exclusive with C(license_file).
+ type: list
+
+- key: license_file
+ description:
+ - The path to the license file for the collection.
+ - This path is relative to the root of the collection.
+ - This key is mutually exclusive with C(license).
+ type: str
+
+- key: tags
+ description:
+ - A list of tags you want to associate with the collection for indexing/searching.
+ - A tag name has the same character requirements as C(namespace) and C(name).
+ type: list
+
+- key: dependencies
+ description:
+ - Collections that this collection requires to be installed for it to be usable.
+ - The key of the dict is the collection label C(namespace.name).
+ - The value is a version range
+ L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification).
+ - Multiple version range specifiers can be set and are separated by C(,).
+ type: dict
+
+- key: repository
+ description:
+ - The URL of the originating SCM repository.
+ type: str
+
+- key: documentation
+ description:
+ - The URL to any online docs.
+ type: str
+
+- key: homepage
+ description:
+ - The URL to the homepage of the collection/project.
+ type: str
+
+- key: issues
+ description:
+ - The URL to the collection issue tracker.
+ type: str
+
+- key: build_ignore
+ description:
+ - A list of file glob-like patterns used to filter any files or directories
+ that should not be included in the build artifact.
+ - A pattern is matched from the relative path of the file or directory of the
+ collection directory.
+ - This uses C(fnmatch) to match the files or directories.
+ - Some directories and files like C(galaxy.yml), C(*.pyc), C(*.retry), and
+ C(.git) are always filtered.
+ - Mutually exclusive with C(manifest)
+ type: list
+ version_added: '2.10'
+
+- key: manifest
+ description:
+ - A dict controlling use of manifest directives used in building the collection artifact.
+ - The key C(directives) is a list of MANIFEST.in style L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands)
+ - The key C(omit_default_directives) is a boolean that controls whether the default directives are used
+ - Mutually exclusive with C(build_ignore)
+ type: sentinel
+ version_added: '2.14'
diff --git a/lib/ansible/galaxy/data/container/README.md b/lib/ansible/galaxy/data/container/README.md
new file mode 100644
index 0000000..1b66bdb
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/README.md
@@ -0,0 +1,49 @@
+# Role Name
+
+Adds a <SERVICE_NAME> service to your [Ansible Container](https://github.com/ansible/ansible-container) project. Run the following commands
+to install the service:
+
+```
+# Set the working directory to your Ansible Container project root
+$ cd myproject
+
+# Install the service
+$ ansible-container install <USERNAME.ROLE_NAME>
+```
+
+## Requirements
+
+- [Ansible Container](https://github.com/ansible/ansible-container)
+- An existing Ansible Container project. To create a project, simply run the following:
+ ```
+ # Create an empty project directory
+ $ mkdir myproject
+
+ # Set the working directory to the new directory
+ $ cd myproject
+
+ # Initialize the project
+ $ ansible-container init
+ ```
+
+- Continue listing any prerequisites here...
+
+
+## Role Variables
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set
+via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+## Dependencies
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+## License
+
+BSD
+
+## Author Information
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+
+
diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
new file mode 100644
index 0000000..3818e64
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/files/.git_keep b/lib/ansible/galaxy/data/container/files/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/files/.git_keep
diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
new file mode 100644
index 0000000..3f4c496
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2
new file mode 100644
index 0000000..f033d34
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2
@@ -0,0 +1,11 @@
+# Add your Ansible Container service definitions here.
+# For example:
+ #
+ # web:
+ # image: ubuntu:trusty
+ # ports:
+ # - "80:80"
+ # command: ['/usr/bin/dumb-init', '/usr/sbin/apache2ctl', '-D', 'FOREGROUND']
+ # dev_overrides:
+ # environment:
+ # - "DEBUG=1"
diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2
new file mode 100644
index 0000000..72fc9a2
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2
@@ -0,0 +1,52 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_container_version: 0.2.0
+
+ # If Ansible is required outside of the build container, provide the minimum version:
+ # min_ansible_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags:
+ - container
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
new file mode 100644
index 0000000..a988065
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/templates/.git_keep b/lib/ansible/galaxy/data/container/templates/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg
new file mode 100644
index 0000000..2f74f1b
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+inventory=./inventory
diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory
new file mode 100644
index 0000000..ea69cbf
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/inventory
@@ -0,0 +1,3 @@
+localhost
+
+
diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2
new file mode 100644
index 0000000..fb14f85
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2
@@ -0,0 +1,7 @@
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ tasks:
+
+ # Add tasks and assertions for testing the service here.
diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2
new file mode 100644
index 0000000..092d511
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2
new file mode 100644
index 0000000..5e51622
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/README.md.j2
@@ -0,0 +1,3 @@
+# Ansible Collection - {{ namespace }}.{{ collection_name }}
+
+Documentation for the collection.
diff --git a/lib/ansible/galaxy/data/default/collection/docs/.git_keep b/lib/ansible/galaxy/data/default/collection/docs/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/docs/.git_keep
diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
new file mode 100644
index 0000000..7821491
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
@@ -0,0 +1,16 @@
+### REQUIRED
+{% for option in required_config %}
+{{ option.description | comment_ify }}
+{{ {option.key: option.value} | to_nice_yaml }}
+{% endfor %}
+
+### OPTIONAL but strongly recommended
+{% for option in optional_config %}
+{{ option.description | comment_ify }}
+{% if option.key == 'manifest' %}
+{{ {option.key: option.value} | to_nice_yaml | comment_ify }}
+
+{% else %}
+{{ {option.key: option.value} | to_nice_yaml }}
+{% endif %}
+{% endfor %}
diff --git a/lib/ansible/galaxy/data/default/collection/meta/runtime.yml b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
new file mode 100644
index 0000000..20f709e
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/meta/runtime.yml
@@ -0,0 +1,52 @@
+---
+# Collections must specify a minimum required ansible version to upload
+# to galaxy
+# requires_ansible: '>=2.9.10'
+
+# Content that Ansible needs to load from another location or that has
+# been deprecated/removed
+# plugin_routing:
+# action:
+# redirected_plugin_name:
+# redirect: ns.col.new_location
+# deprecated_plugin_name:
+# deprecation:
+# removal_version: "4.0.0"
+# warning_text: |
+# See the porting guide on how to update your playbook to
+# use ns.col.another_plugin instead.
+# removed_plugin_name:
+# tombstone:
+# removal_version: "2.0.0"
+# warning_text: |
+# See the porting guide on how to update your playbook to
+# use ns.col.another_plugin instead.
+# become:
+# cache:
+# callback:
+# cliconf:
+# connection:
+# doc_fragments:
+# filter:
+# httpapi:
+# inventory:
+# lookup:
+# module_utils:
+# modules:
+# netconf:
+# shell:
+# strategy:
+# terminal:
+# test:
+# vars:
+
+# Python import statements that Ansible needs to load from another location
+# import_redirection:
+# ansible_collections.ns.col.plugins.module_utils.old_location:
+# redirect: ansible_collections.ns.col.plugins.module_utils.new_location
+
+# Groups of actions/modules that take a common set of options
+# action_groups:
+# group_name:
+# - module1
+# - module2
diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
new file mode 100644
index 0000000..7c006cf
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
@@ -0,0 +1,31 @@
+# Collections Plugins Directory
+
+This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
+is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
+would contain module utils and modules respectively.
+
+Here is an example directory of the majority of plugins currently supported by Ansible:
+
+```
+└── plugins
+ ├── action
+ ├── become
+ ├── cache
+ ├── callback
+ ├── cliconf
+ ├── connection
+ ├── filter
+ ├── httpapi
+ ├── inventory
+ ├── lookup
+ ├── module_utils
+ ├── modules
+ ├── netconf
+ ├── shell
+ ├── strategy
+ ├── terminal
+ ├── test
+ └── vars
+```
+
+A full list of plugin types can be found at [Working With Plugins]({{ ansible_plugin_list_dir }}).
diff --git a/lib/ansible/galaxy/data/default/collection/roles/.git_keep b/lib/ansible/galaxy/data/default/collection/roles/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/roles/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/README.md b/lib/ansible/galaxy/data/default/role/README.md
new file mode 100644
index 0000000..225dd44
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
new file mode 100644
index 0000000..3818e64
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/files/.git_keep b/lib/ansible/galaxy/data/default/role/files/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/files/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
new file mode 100644
index 0000000..3f4c496
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
new file mode 100644
index 0000000..4891a68
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
@@ -0,0 +1,55 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_version: {{ min_ansible_version }}
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
+{% for dependency in dependencies %}
+ #- {{ dependency }}
+{%- endfor %}
diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
new file mode 100644
index 0000000..a988065
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/templates/.git_keep b/lib/ansible/galaxy/data/default/role/templates/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory
new file mode 100644
index 0000000..878877b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
new file mode 100644
index 0000000..0c40f95
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
new file mode 100644
index 0000000..092d511
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/README.md b/lib/ansible/galaxy/data/network/README.md
new file mode 100644
index 0000000..84533c6
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses any vendor specific SDKs or module with specific dependencies, it may be a good idea to mention in this section that the package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
new file mode 100644
index 0000000..02f234a
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.cliconf import CliconfBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class Cliconf(CliconfBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
+ """
+ raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
new file mode 100644
index 0000000..3818e64
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/files/.git_keep b/lib/ansible/galaxy/data/network/files/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/files/.git_keep
diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2
new file mode 100644
index 0000000..0f3dac2
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+#### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_command ]: Dependency not satisfied")
+
+#### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_command ]: Not Implemented")
+
+#### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2
new file mode 100644
index 0000000..2c2c72b
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_config ]: Dependency not satisfied")
+
+### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_config ]: Not Implemented")
+
+### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
new file mode 100644
index 0000000..9f7608c
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_facts ]: Dependency not satisfied")
+
+### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_facts ]: Not Implemented")
+
+### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2
new file mode 100644
index 0000000..d0184ae
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2
@@ -0,0 +1,52 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_version: {{ min_ansible_version }}
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # platforms is a list of platforms, and each platform has a name and a list of versions.
+ #
+ # platforms:
+ # - name: VYOS
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
+{%- for dependency in dependencies %}
+ #- {{ dependency }}
+{%- endfor %}
diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
new file mode 100644
index 0000000..9bf2d3f
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+### Imports
+try:
+ from ansible.module_utils.basic import env_fallback, return_values
+ from ansible.module_utils.connection import Connection
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
+ """
+except ImportError:
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+### Implementation
+"""
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
+"""
diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
new file mode 100644
index 0000000..e3a1ce6
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.terminal import NetconfBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class Netconf(NetconfBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
+ """
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
new file mode 100644
index 0000000..a988065
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/templates/.git_keep b/lib/ansible/galaxy/data/network/templates/.git_keep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
new file mode 100644
index 0000000..621a140
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.terminal import TerminalBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class TerminalModule(TerminalBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
+ """
+ raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory
new file mode 100644
index 0000000..878877b
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2
new file mode 100644
index 0000000..11284eb
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2
@@ -0,0 +1,14 @@
+---
+- hosts: localhost
+ connection: network_cli
+ gather_facts: False
+
+ roles:
+ - {{ role_name }}
+
+- hosts: localhost
+ connection: netconf
+ gather_facts: False
+
+ roles:
+ - {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2
new file mode 100644
index 0000000..092d511
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/dependency_resolution/__init__.py b/lib/ansible/galaxy/dependency_resolution/__init__.py
new file mode 100644
index 0000000..cfde7df
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/__init__.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency resolution machinery."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import typing as t
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.api import GalaxyAPI
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate,
+ Requirement,
+ )
+
+from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+from ansible.galaxy.dependency_resolution.providers import CollectionDependencyProvider
+from ansible.galaxy.dependency_resolution.reporters import CollectionDependencyReporter
+from ansible.galaxy.dependency_resolution.resolvers import CollectionDependencyResolver
+
+
+def build_collection_dependency_resolver(
+ galaxy_apis, # type: t.Iterable[GalaxyAPI]
+ concrete_artifacts_manager, # type: ConcreteArtifactsManager
+ user_requirements, # type: t.Iterable[Requirement]
+ preferred_candidates=None, # type: t.Iterable[Candidate]
+ with_deps=True, # type: bool
+ with_pre_releases=False, # type: bool
+ upgrade=False, # type: bool
+ include_signatures=True, # type: bool
+ offline=False, # type: bool
+): # type: (...) -> CollectionDependencyResolver
+ """Return a collection dependency resolver.
+
+ The returned instance will have a ``resolve()`` method for
+ further consumption.
+ """
+ return CollectionDependencyResolver(
+ CollectionDependencyProvider(
+ apis=MultiGalaxyAPIProxy(galaxy_apis, concrete_artifacts_manager, offline=offline),
+ concrete_artifacts_manager=concrete_artifacts_manager,
+ user_requirements=user_requirements,
+ preferred_candidates=preferred_candidates,
+ with_deps=with_deps,
+ with_pre_releases=with_pre_releases,
+ upgrade=upgrade,
+ include_signatures=include_signatures,
+ ),
+ CollectionDependencyReporter(),
+ )
diff --git a/lib/ansible/galaxy/dependency_resolution/dataclasses.py b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
new file mode 100644
index 0000000..16fd631
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/dataclasses.py
@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency structs."""
+# FIXME: add caching all over the place
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import typing as t
+
+from collections import namedtuple
+from collections.abc import MutableSequence, MutableMapping
+from glob import iglob
+from urllib.parse import urlparse
+from yaml import safe_load
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ Collection = t.TypeVar(
+ 'Collection',
+ 'Candidate', 'Requirement',
+ '_ComputedReqKindsMixin',
+ )
+
+
+from ansible.errors import AnsibleError
+from ansible.galaxy.api import GalaxyAPI
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+
+
+_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
+_GALAXY_YAML = b'galaxy.yml'
+_MANIFEST_JSON = b'MANIFEST.json'
+_SOURCE_METADATA_FILE = b'GALAXY.yml'
+
+display = Display()
+
+
+def get_validated_source_info(b_source_info_path, namespace, name, version):
+ source_info_path = to_text(b_source_info_path, errors='surrogate_or_strict')
+
+ if not os.path.isfile(b_source_info_path):
+ return None
+
+ try:
+ with open(b_source_info_path, mode='rb') as fd:
+ metadata = safe_load(fd)
+ except OSError as e:
+ display.warning(
+ f"Error getting collection source information at '{source_info_path}': {to_text(e, errors='surrogate_or_strict')}"
+ )
+ return None
+
+ if not isinstance(metadata, MutableMapping):
+ display.warning(f"Error getting collection source information at '{source_info_path}': expected a YAML dictionary")
+ return None
+
+ schema_errors = _validate_v1_source_info_schema(namespace, name, version, metadata)
+ if schema_errors:
+ display.warning(f"Ignoring source metadata file at {source_info_path} due to the following errors:")
+ display.warning("\n".join(schema_errors))
+ display.warning("Correct the source metadata file by reinstalling the collection.")
+ return None
+
+ return metadata
+
+
+def _validate_v1_source_info_schema(namespace, name, version, provided_arguments):
+ argument_spec_data = dict(
+ format_version=dict(choices=["1.0.0"]),
+ download_url=dict(),
+ version_url=dict(),
+ server=dict(),
+ signatures=dict(
+ type=list,
+ suboptions=dict(
+ signature=dict(),
+ pubkey_fingerprint=dict(),
+ signing_service=dict(),
+ pulp_created=dict(),
+ )
+ ),
+ name=dict(choices=[name]),
+ namespace=dict(choices=[namespace]),
+ version=dict(choices=[version]),
+ )
+
+ if not isinstance(provided_arguments, dict):
+ raise AnsibleError(
+ f'Invalid offline source info for {namespace}.{name}:{version}, expected a dict and got {type(provided_arguments)}'
+ )
+ validator = ArgumentSpecValidator(argument_spec_data)
+ validation_result = validator.validate(provided_arguments)
+
+ return validation_result.error_messages
+
+
+def _is_collection_src_dir(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML))
+
+
+def _is_installed_collection_dir(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON))
+
+
+def _is_collection_dir(dir_path):
+ return (
+ _is_installed_collection_dir(dir_path) or
+ _is_collection_src_dir(dir_path)
+ )
+
+
+def _find_collections_in_subdirs(dir_path):
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+
+ subdir_glob_pattern = os.path.join(
+ b_dir_path,
+ # b'*', # namespace is supposed to be top-level per spec
+ b'*', # collection name
+ )
+
+ for subdir in iglob(subdir_glob_pattern):
+ if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)):
+ yield subdir
+ elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)):
+ yield subdir
+
+
+def _is_collection_namespace_dir(tested_str):
+ return any(_find_collections_in_subdirs(tested_str))
+
+
+def _is_file_path(tested_str):
+ return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict'))
+
+
+def _is_http_url(tested_str):
+ return urlparse(tested_str).scheme.lower() in {'http', 'https'}
+
+
+def _is_git_url(tested_str):
+ return tested_str.startswith(('git+', 'git@'))
+
+
+def _is_concrete_artifact_pointer(tested_str):
+ return any(
+ predicate(tested_str)
+ for predicate in (
+ # NOTE: Maintain the checks to be sorted from light to heavy:
+ _is_git_url,
+ _is_http_url,
+ _is_file_path,
+ _is_collection_dir,
+ _is_collection_namespace_dir,
+ )
+ )
+
+
+class _ComputedReqKindsMixin:
+
+ def __init__(self, *args, **kwargs):
+ if not self.may_have_offline_galaxy_info:
+ self._source_info = None
+ else:
+ info_path = self.construct_galaxy_info_path(to_bytes(self.src, errors='surrogate_or_strict'))
+
+ self._source_info = get_validated_source_info(
+ info_path,
+ self.namespace,
+ self.name,
+ self.ver
+ )
+
+ @classmethod
+ def from_dir_path_as_unknown( # type: ignore[misc]
+ cls, # type: t.Type[Collection]
+ dir_path, # type: bytes
+ art_mgr, # type: ConcreteArtifactsManager
+ ): # type: (...) -> Collection
+ """Make collection from an unspecified dir type.
+
+ This alternative constructor attempts to grab metadata from the
+ given path if it's a directory. If there's no metadata, it
+ falls back to guessing the FQCN based on the directory path and
+ sets the version to "*".
+
+ It raises a ValueError immediately if the input is not an
+ existing directory path.
+ """
+ if not os.path.isdir(dir_path):
+ raise ValueError(
+ "The collection directory '{path!s}' doesn't exist".
+ format(path=to_native(dir_path)),
+ )
+
+ try:
+ return cls.from_dir_path(dir_path, art_mgr)
+ except ValueError:
+ return cls.from_dir_path_implicit(dir_path)
+
+ @classmethod
+ def from_dir_path(cls, dir_path, art_mgr):
+ """Make collection from an directory with metadata."""
+ b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
+ if not _is_collection_dir(b_dir_path):
+ display.warning(
+ u"Collection at '{path!s}' does not have a {manifest_json!s} "
+ u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
+ format(
+ galaxy_yml=to_text(_GALAXY_YAML),
+ manifest_json=to_text(_MANIFEST_JSON),
+ path=to_text(dir_path, errors='surrogate_or_strict'),
+ ),
+ )
+ raise ValueError(
+ '`dir_path` argument must be an installed or a source'
+ ' collection directory.',
+ )
+
+ tmp_inst_req = cls(None, None, dir_path, 'dir', None)
+ req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
+ try:
+ req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req)
+ except TypeError as err:
+ # Looks like installed/source dir but isn't: doesn't have valid metadata.
+ display.warning(
+ u"Collection at '{path!s}' has a {manifest_json!s} "
+ u"or {galaxy_yml!s} file but it contains invalid metadata.".
+ format(
+ galaxy_yml=to_text(_GALAXY_YAML),
+ manifest_json=to_text(_MANIFEST_JSON),
+ path=to_text(dir_path, errors='surrogate_or_strict'),
+ ),
+ )
+ raise ValueError(
+ "Collection at '{path!s}' has invalid metadata".
+ format(path=to_text(dir_path, errors='surrogate_or_strict'))
+ ) from err
+
+ return cls(req_name, req_version, dir_path, 'dir', None)
+
+ @classmethod
+ def from_dir_path_implicit( # type: ignore[misc]
+ cls, # type: t.Type[Collection]
+ dir_path, # type: bytes
+ ): # type: (...) -> Collection
+ """Construct a collection instance based on an arbitrary dir.
+
+ This alternative constructor infers the FQCN based on the parent
+ and current directory names. It also sets the version to "*"
+ regardless of whether any of known metadata files are present.
+ """
+ # There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path.
+ u_dir_path = to_text(dir_path, errors='surrogate_or_strict')
+ path_list = u_dir_path.split(os.path.sep)
+ req_name = '.'.join(path_list[-2:])
+ return cls(req_name, '*', dir_path, 'dir', None) # type: ignore[call-arg]
+
+ @classmethod
+ def from_string(cls, collection_input, artifacts_manager, supplemental_signatures):
+ req = {}
+ if _is_concrete_artifact_pointer(collection_input):
+ # Arg is a file path or URL to a collection
+ req['name'] = collection_input
+ else:
+ req['name'], _sep, req['version'] = collection_input.partition(':')
+ if not req['version']:
+ del req['version']
+ req['signatures'] = supplemental_signatures
+
+ return cls.from_requirement_dict(req, artifacts_manager)
+
+ @classmethod
+ def from_requirement_dict(cls, collection_req, art_mgr, validate_signature_options=True):
+ req_name = collection_req.get('name', None)
+ req_version = collection_req.get('version', '*')
+ req_type = collection_req.get('type')
+ # TODO: decide how to deprecate the old src API behavior
+ req_source = collection_req.get('source', None)
+ req_signature_sources = collection_req.get('signatures', None)
+ if req_signature_sources is not None:
+ if validate_signature_options and art_mgr.keyring is None:
+ raise AnsibleError(
+ f"Signatures were provided to verify {req_name} but no keyring was configured."
+ )
+
+ if not isinstance(req_signature_sources, MutableSequence):
+ req_signature_sources = [req_signature_sources]
+ req_signature_sources = frozenset(req_signature_sources)
+
+ if req_type is None:
+ if ( # FIXME: decide on the future behavior:
+ _ALLOW_CONCRETE_POINTER_IN_SOURCE
+ and req_source is not None
+ and _is_concrete_artifact_pointer(req_source)
+ ):
+ src_path = req_source
+ elif (
+ req_name is not None
+ and AnsibleCollectionRef.is_valid_collection_name(req_name)
+ ):
+ req_type = 'galaxy'
+ elif (
+ req_name is not None
+ and _is_concrete_artifact_pointer(req_name)
+ ):
+ src_path, req_name = req_name, None
+ else:
+ dir_tip_tmpl = ( # NOTE: leading LFs are for concat
+ '\n\nTip: Make sure you are pointing to the right '
+ 'subdirectory — `{src!s}` looks like a directory '
+ 'but it is neither a collection, nor a namespace '
+ 'dir.'
+ )
+
+ if req_source is not None and os.path.isdir(req_source):
+ tip = dir_tip_tmpl.format(src=req_source)
+ elif req_name is not None and os.path.isdir(req_name):
+ tip = dir_tip_tmpl.format(src=req_name)
+ elif req_name:
+ tip = '\n\nCould not find {0}.'.format(req_name)
+ else:
+ tip = ''
+
+ raise AnsibleError( # NOTE: I'd prefer a ValueError instead
+ 'Neither the collection requirement entry key '
+ "'name', nor 'source' point to a concrete "
+ "resolvable collection artifact. Also 'name' is "
+ 'not an FQCN. A valid collection name must be in '
+ 'the format <namespace>.<collection>. Please make '
+ 'sure that the namespace and the collection name '
+ 'contain characters from [a-zA-Z0-9_] only.'
+ '{extra_tip!s}'.format(extra_tip=tip),
+ )
+
+ if req_type is None:
+ if _is_git_url(src_path):
+ req_type = 'git'
+ req_source = src_path
+ elif _is_http_url(src_path):
+ req_type = 'url'
+ req_source = src_path
+ elif _is_file_path(src_path):
+ req_type = 'file'
+ req_source = src_path
+ elif _is_collection_dir(src_path):
+ if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path):
+ # Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it
+ # doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used
+ # instead of the ``galaxy.yml``.
+ raise AnsibleError(
+ u"Collection requirement at '{path!s}' has both a {manifest_json!s} "
+ u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed "
+ u"collection directory or a source collection directory, not both.".
+ format(
+ path=to_text(src_path, errors='surrogate_or_strict'),
+ manifest_json=to_text(_MANIFEST_JSON),
+ galaxy_yml=to_text(_GALAXY_YAML),
+ )
+ )
+ req_type = 'dir'
+ req_source = src_path
+ elif _is_collection_namespace_dir(src_path):
+ req_name = None # No name for a virtual req or "namespace."?
+ req_type = 'subdirs'
+ req_source = src_path
+ else:
+ raise AnsibleError( # NOTE: this is never supposed to be hit
+ 'Failed to automatically detect the collection '
+ 'requirement type.',
+ )
+
+ if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}:
+ raise AnsibleError(
+ "The collection requirement entry key 'type' must be "
+ 'one of file, galaxy, git, dir, subdirs, or url.'
+ )
+
+ if req_name is None and req_type == 'galaxy':
+ raise AnsibleError(
+ 'Collections requirement entry should contain '
+ "the key 'name' if it's requested from a Galaxy-like "
+ 'index server.',
+ )
+
+ if req_type != 'galaxy' and req_source is None:
+ req_source, req_name = req_name, None
+
+ if (
+ req_type == 'galaxy' and
+ isinstance(req_source, GalaxyAPI) and
+ not _is_http_url(req_source.api_server)
+ ):
+ raise AnsibleError(
+ "Collections requirement 'source' entry should contain "
+ 'a valid Galaxy API URL but it does not: {not_url!s} '
+ 'is not an HTTP URL.'.
+ format(not_url=req_source.api_server),
+ )
+
+ tmp_inst_req = cls(req_name, req_version, req_source, req_type, req_signature_sources)
+
+ if req_type not in {'galaxy', 'subdirs'} and req_name is None:
+ req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager?
+
+ if req_type not in {'galaxy', 'subdirs'} and req_version == '*':
+ req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
+
+ return cls(
+ req_name, req_version,
+ req_source, req_type,
+ req_signature_sources,
+ )
+
+ def __repr__(self):
+ return (
+ '<{self!s} of type {coll_type!r} from {src!s}>'.
+ format(self=self, coll_type=self.type, src=self.src or 'Galaxy')
+ )
+
+ def __str__(self):
+ return to_native(self.__unicode__())
+
+ def __unicode__(self):
+ if self.fqcn is None:
+ return (
+ u'"virtual collection Git repo"' if self.is_scm
+ else u'"virtual collection namespace"'
+ )
+
+ return (
+ u'{fqcn!s}:{ver!s}'.
+ format(fqcn=to_text(self.fqcn), ver=to_text(self.ver))
+ )
+
+ @property
+ def may_have_offline_galaxy_info(self):
+ if self.fqcn is None:
+ # Virtual collection
+ return False
+ elif not self.is_dir or self.src is None or not _is_collection_dir(self.src):
+ # Not a dir or isn't on-disk
+ return False
+ return True
+
+ def construct_galaxy_info_path(self, b_collection_path):
+ if not self.may_have_offline_galaxy_info and not self.type == 'galaxy':
+ raise TypeError('Only installed collections from a Galaxy server have offline Galaxy info')
+
+ # Store Galaxy metadata adjacent to the namespace of the collection
+ # Chop off the last two parts of the path (/ns/coll) to get the dir containing the ns
+ b_src = to_bytes(b_collection_path, errors='surrogate_or_strict')
+ b_path_parts = b_src.split(to_bytes(os.path.sep))[0:-2]
+ b_metadata_dir = to_bytes(os.path.sep).join(b_path_parts)
+
+ # ns.coll-1.0.0.info
+ b_dir_name = to_bytes(f"{self.namespace}.{self.name}-{self.ver}.info", errors="surrogate_or_strict")
+
+ # collections/ansible_collections/ns.coll-1.0.0.info/GALAXY.yml
+ return os.path.join(b_metadata_dir, b_dir_name, _SOURCE_METADATA_FILE)
+
+ def _get_separate_ns_n_name(self): # FIXME: use LRU cache
+ return self.fqcn.split('.')
+
+ @property
+ def namespace(self):
+ if self.is_virtual:
+ raise TypeError('Virtual collections do not have a namespace')
+
+ return self._get_separate_ns_n_name()[0]
+
+ @property
+ def name(self):
+ if self.is_virtual:
+ raise TypeError('Virtual collections do not have a name')
+
+ return self._get_separate_ns_n_name()[-1]
+
+ @property
+ def canonical_package_id(self):
+ if not self.is_virtual:
+ return to_native(self.fqcn)
+
+ return (
+ '<virtual namespace from {src!s} of type {src_type!s}>'.
+ format(src=to_native(self.src), src_type=to_native(self.type))
+ )
+
+ @property
+ def is_virtual(self):
+ return self.is_scm or self.is_subdirs
+
+ @property
+ def is_file(self):
+ return self.type == 'file'
+
+ @property
+ def is_dir(self):
+ return self.type == 'dir'
+
+ @property
+ def namespace_collection_paths(self):
+ return [
+ to_native(path)
+ for path in _find_collections_in_subdirs(self.src)
+ ]
+
+ @property
+ def is_subdirs(self):
+ return self.type == 'subdirs'
+
+ @property
+ def is_url(self):
+ return self.type == 'url'
+
+ @property
+ def is_scm(self):
+ return self.type == 'git'
+
+ @property
+ def is_concrete_artifact(self):
+ return self.type in {'git', 'url', 'file', 'dir', 'subdirs'}
+
+ @property
+ def is_online_index_pointer(self):
+ return not self.is_concrete_artifact
+
+ @property
+ def source_info(self):
+ return self._source_info
+
+
+RequirementNamedTuple = namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type', 'signature_sources')) # type: ignore[name-match]
+
+
+CandidateNamedTuple = namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type', 'signatures')) # type: ignore[name-match]
+
+
+class Requirement(
+ _ComputedReqKindsMixin,
+ RequirementNamedTuple,
+):
+ """An abstract requirement request."""
+
+ def __new__(cls, *args, **kwargs):
+ self = RequirementNamedTuple.__new__(cls, *args, **kwargs)
+ return self
+
+ def __init__(self, *args, **kwargs):
+ super(Requirement, self).__init__()
+
+
+class Candidate(
+ _ComputedReqKindsMixin,
+ CandidateNamedTuple,
+):
+ """A concrete collection candidate with its version resolved."""
+
+ def __new__(cls, *args, **kwargs):
+ self = CandidateNamedTuple.__new__(cls, *args, **kwargs)
+ return self
+
+ def __init__(self, *args, **kwargs):
+ super(Candidate, self).__init__()
diff --git a/lib/ansible/galaxy/dependency_resolution/errors.py b/lib/ansible/galaxy/dependency_resolution/errors.py
new file mode 100644
index 0000000..ae3b439
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/errors.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Dependency resolution exceptions."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from resolvelib.resolvers import (
+ ResolutionImpossible as CollectionDependencyResolutionImpossible,
+ InconsistentCandidate as CollectionDependencyInconsistentCandidate,
+ )
+except ImportError:
+ class CollectionDependencyResolutionImpossible(Exception): # type: ignore[no-redef]
+ pass
+
+ class CollectionDependencyInconsistentCandidate(Exception): # type: ignore[no-redef]
+ pass
diff --git a/lib/ansible/galaxy/dependency_resolution/providers.py b/lib/ansible/galaxy/dependency_resolution/providers.py
new file mode 100644
index 0000000..817a1eb
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/providers.py
@@ -0,0 +1,548 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requirement provider interfaces."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+import typing as t
+
+if t.TYPE_CHECKING:
+ from ansible.galaxy.collection.concrete_artifact_manager import (
+ ConcreteArtifactsManager,
+ )
+ from ansible.galaxy.collection.galaxy_api_proxy import MultiGalaxyAPIProxy
+ from ansible.galaxy.api import GalaxyAPI
+
+from ansible.galaxy.collection.gpg import get_signature_from_source
+from ansible.galaxy.dependency_resolution.dataclasses import (
+ Candidate,
+ Requirement,
+)
+from ansible.galaxy.dependency_resolution.versioning import (
+ is_pre_release,
+ meets_requirements,
+)
+from ansible.module_utils.six import string_types
+from ansible.utils.version import SemanticVersion, LooseVersion
+
+from collections.abc import Set
+
+try:
+ from resolvelib import AbstractProvider
+ from resolvelib import __version__ as resolvelib_version
+except ImportError:
+ class AbstractProvider: # type: ignore[no-redef]
+ pass
+
+ resolvelib_version = '0.0.0'
+
+
+# TODO: add python requirements to ansible-test's ansible-core distribution info and remove the hardcoded lowerbound/upperbound fallback
+RESOLVELIB_LOWERBOUND = SemanticVersion("0.5.3")
+RESOLVELIB_UPPERBOUND = SemanticVersion("0.9.0")
+RESOLVELIB_VERSION = SemanticVersion.from_loose_version(LooseVersion(resolvelib_version))
+
+
+class PinnedCandidateRequests(Set):
+ """Custom set class to store Candidate objects. Excludes the 'signatures' attribute when determining if a Candidate instance is in the set."""
+ CANDIDATE_ATTRS = ('fqcn', 'ver', 'src', 'type')
+
+ def __init__(self, candidates):
+ self._candidates = set(candidates)
+
+ def __iter__(self):
+ return iter(self._candidates)
+
+ def __contains__(self, value):
+ if not isinstance(value, Candidate):
+ raise ValueError(f"Expected a Candidate object but got {value!r}")
+ for candidate in self._candidates:
+ # Compare Candidate attributes excluding "signatures" since it is
+ # unrelated to whether or not a matching Candidate is user-requested.
+ # Candidate objects in the set are not expected to have signatures.
+ for attr in PinnedCandidateRequests.CANDIDATE_ATTRS:
+ if getattr(value, attr) != getattr(candidate, attr):
+ break
+ else:
+ return True
+ return False
+
+ def __len__(self):
+ return len(self._candidates)
+
+
+class CollectionDependencyProviderBase(AbstractProvider):
+ """Delegate providing a requirement interface for the resolver."""
+
+ def __init__(
+ self, # type: CollectionDependencyProviderBase
+ apis, # type: MultiGalaxyAPIProxy
+ concrete_artifacts_manager=None, # type: ConcreteArtifactsManager
+ user_requirements=None, # type: t.Iterable[Requirement]
+ preferred_candidates=None, # type: t.Iterable[Candidate]
+ with_deps=True, # type: bool
+ with_pre_releases=False, # type: bool
+ upgrade=False, # type: bool
+ include_signatures=True, # type: bool
+ ): # type: (...) -> None
+ r"""Initialize helper attributes.
+
+ :param api: An instance of the multiple Galaxy APIs wrapper.
+
+ :param concrete_artifacts_manager: An instance of the caching \
+ concrete artifacts manager.
+
+ :param with_deps: A flag specifying whether the resolver \
+ should attempt to pull-in the deps of the \
+ requested requirements. On by default.
+
+ :param with_pre_releases: A flag specifying whether the \
+ resolver should skip pre-releases. \
+ Off by default.
+
+ :param upgrade: A flag specifying whether the resolver should \
+ skip matching versions that are not upgrades. \
+ Off by default.
+
+ :param include_signatures: A flag to determine whether to retrieve \
+ signatures from the Galaxy APIs and \
+ include signatures in matching Candidates. \
+ On by default.
+ """
+ self._api_proxy = apis
+ self._make_req_from_dict = functools.partial(
+ Requirement.from_requirement_dict,
+ art_mgr=concrete_artifacts_manager,
+ )
+ self._pinned_candidate_requests = PinnedCandidateRequests(
+ # NOTE: User-provided signatures are supplemental, so signatures
+ # NOTE: are not used to determine if a candidate is user-requested
+ Candidate(req.fqcn, req.ver, req.src, req.type, None)
+ for req in (user_requirements or ())
+ if req.is_concrete_artifact or (
+ req.ver != '*' and
+ not req.ver.startswith(('<', '>', '!='))
+ )
+ )
+ self._preferred_candidates = set(preferred_candidates or ())
+ self._with_deps = with_deps
+ self._with_pre_releases = with_pre_releases
+ self._upgrade = upgrade
+ self._include_signatures = include_signatures
+
+ def _is_user_requested(self, candidate): # type: (Candidate) -> bool
+ """Check if the candidate is requested by the user."""
+ if candidate in self._pinned_candidate_requests:
+ return True
+
+ if candidate.is_online_index_pointer and candidate.src is not None:
+ # NOTE: Candidate is a namedtuple, it has a source server set
+ # NOTE: to a specific GalaxyAPI instance or `None`. When the
+ # NOTE: user runs
+ # NOTE:
+ # NOTE: $ ansible-galaxy collection install ns.coll
+ # NOTE:
+ # NOTE: then it's saved in `self._pinned_candidate_requests`
+ # NOTE: as `('ns.coll', '*', None, 'galaxy')` but then
+ # NOTE: `self.find_matches()` calls `self.is_satisfied_by()`
+ # NOTE: with Candidate instances bound to each specific
+ # NOTE: server available, those look like
+ # NOTE: `('ns.coll', '*', GalaxyAPI(...), 'galaxy')` and
+ # NOTE: wouldn't match the user requests saved in
+ # NOTE: `self._pinned_candidate_requests`. This is why we
+ # NOTE: normalize the collection to have `src=None` and try
+ # NOTE: again.
+ # NOTE:
+ # NOTE: When the user request comes from `requirements.yml`
+ # NOTE: with the `source:` set, it'll match the first check
+ # NOTE: but it still can have entries with `src=None` so this
+ # NOTE: normalized check is still necessary.
+ # NOTE:
+ # NOTE: User-provided signatures are supplemental, so signatures
+ # NOTE: are not used to determine if a candidate is user-requested
+ return Candidate(
+ candidate.fqcn, candidate.ver, None, candidate.type, None
+ ) in self._pinned_candidate_requests
+
+ return False
+
+ def identify(self, requirement_or_candidate):
+ # type: (t.Union[Candidate, Requirement]) -> str
+ """Given requirement or candidate, return an identifier for it.
+
+ This is used to identify a requirement or candidate, e.g.
+ whether two requirements should have their specifier parts
+ (version ranges or pins) merged, whether two candidates would
+ conflict with each other (because they have same name but
+ different versions).
+ """
+ return requirement_or_candidate.canonical_package_id
+
+ def get_preference(self, *args, **kwargs):
+ # type: (t.Any, t.Any) -> t.Union[float, int]
+ """Return sort key function return value for given requirement.
+
+ This result should be based on preference that is defined as
+ "I think this requirement should be resolved first".
+ The lower the return value is, the more preferred this
+ group of arguments is.
+
+ resolvelib >=0.5.3, <0.7.0
+
+ :param resolution: Currently pinned candidate, or ``None``.
+
+ :param candidates: A list of possible candidates.
+
+ :param information: A list of requirement information.
+
+ Each ``information`` instance is a named tuple with two entries:
+
+ * ``requirement`` specifies a requirement contributing to
+ the current candidate list
+
+ * ``parent`` specifies the candidate that provides
+ (dependend on) the requirement, or `None`
+ to indicate a root requirement.
+
+ resolvelib >=0.7.0, < 0.8.0
+
+ :param identifier: The value returned by ``identify()``.
+
+ :param resolutions: Mapping of identifier, candidate pairs.
+
+ :param candidates: Possible candidates for the identifer.
+ Mapping of identifier, list of candidate pairs.
+
+ :param information: Requirement information of each package.
+ Mapping of identifier, list of named tuple pairs.
+ The named tuples have the entries ``requirement`` and ``parent``.
+
+ resolvelib >=0.8.0, <= 0.8.1
+
+ :param identifier: The value returned by ``identify()``.
+
+ :param resolutions: Mapping of identifier, candidate pairs.
+
+ :param candidates: Possible candidates for the identifer.
+ Mapping of identifier, list of candidate pairs.
+
+ :param information: Requirement information of each package.
+ Mapping of identifier, list of named tuple pairs.
+ The named tuples have the entries ``requirement`` and ``parent``.
+
+ :param backtrack_causes: Sequence of requirement information that were
+ the requirements that caused the resolver to most recently backtrack.
+
+ The preference could depend on a various of issues, including
+ (not necessarily in this order):
+
+ * Is this package pinned in the current resolution result?
+
+ * How relaxed is the requirement? Stricter ones should
+ probably be worked on first? (I don't know, actually.)
+
+ * How many possibilities are there to satisfy this
+ requirement? Those with few left should likely be worked on
+ first, I guess?
+
+ * Are there any known conflicts for this requirement?
+ We should probably work on those with the most
+ known conflicts.
+
+ A sortable value should be returned (this will be used as the
+ `key` parameter of the built-in sorting function). The smaller
+ the value is, the more preferred this requirement is (i.e. the
+ sorting function is called with ``reverse=False``).
+ """
+ raise NotImplementedError
+
+ def _get_preference(self, candidates):
+ # type: (list[Candidate]) -> t.Union[float, int]
+ if any(
+ candidate in self._preferred_candidates
+ for candidate in candidates
+ ):
+ # NOTE: Prefer pre-installed candidates over newer versions
+ # NOTE: available from Galaxy or other sources.
+ return float('-inf')
+ return len(candidates)
+
+ def find_matches(self, *args, **kwargs):
+ # type: (t.Any, t.Any) -> list[Candidate]
+ r"""Find all possible candidates satisfying given requirements.
+
+ This tries to get candidates based on the requirements' types.
+
+ For concrete requirements (SCM, dir, namespace dir, local or
+ remote archives), the one-and-only match is returned
+
+ For a "named" requirement, Galaxy-compatible APIs are consulted
+ to find concrete candidates for this requirement. Of theres a
+ pre-installed candidate, it's prepended in front of others.
+
+ resolvelib >=0.5.3, <0.6.0
+
+ :param requirements: A collection of requirements which all of \
+ the returned candidates must match. \
+ All requirements are guaranteed to have \
+ the same identifier. \
+ The collection is never empty.
+
+ resolvelib >=0.6.0
+
+ :param identifier: The value returned by ``identify()``.
+
+ :param requirements: The requirements all returned candidates must satisfy.
+ Mapping of identifier, iterator of requirement pairs.
+
+ :param incompatibilities: Incompatible versions that must be excluded
+ from the returned list.
+
+ :returns: An iterable that orders candidates by preference, \
+ e.g. the most preferred candidate comes first.
+ """
+ raise NotImplementedError
+
+ def _find_matches(self, requirements):
+ # type: (list[Requirement]) -> list[Candidate]
+ # FIXME: The first requirement may be a Git repo followed by
+ # FIXME: its cloned tmp dir. Using only the first one creates
+ # FIXME: loops that prevent any further dependency exploration.
+ # FIXME: We need to figure out how to prevent this.
+ first_req = requirements[0]
+ fqcn = first_req.fqcn
+ # The fqcn is guaranteed to be the same
+ version_req = "A SemVer-compliant version or '*' is required. See https://semver.org to learn how to compose it correctly. "
+ version_req += "This is an issue with the collection."
+
+ # If we're upgrading collections, we can't calculate preinstalled_candidates until the latest matches are found.
+ # Otherwise, we can potentially avoid a Galaxy API call by doing this first.
+ preinstalled_candidates = set()
+ if not self._upgrade and first_req.type == 'galaxy':
+ preinstalled_candidates = {
+ candidate for candidate in self._preferred_candidates
+ if candidate.fqcn == fqcn and
+ all(self.is_satisfied_by(requirement, candidate) for requirement in requirements)
+ }
+ try:
+ coll_versions = [] if preinstalled_candidates else self._api_proxy.get_collection_versions(first_req) # type: t.Iterable[t.Tuple[str, GalaxyAPI]]
+ except TypeError as exc:
+ if first_req.is_concrete_artifact:
+ # Non hashable versions will cause a TypeError
+ raise ValueError(
+ f"Invalid version found for the collection '{first_req}'. {version_req}"
+ ) from exc
+ # Unexpected error from a Galaxy server
+ raise
+
+ if first_req.is_concrete_artifact:
+ # FIXME: do we assume that all the following artifacts are also concrete?
+ # FIXME: does using fqcn==None cause us problems here?
+
+ # Ensure the version found in the concrete artifact is SemVer-compliant
+ for version, req_src in coll_versions:
+ version_err = f"Invalid version found for the collection '{first_req}': {version} ({type(version)}). {version_req}"
+ # NOTE: The known cases causing the version to be a non-string object come from
+ # NOTE: the differences in how the YAML parser normalizes ambiguous values and
+ # NOTE: how the end-users sometimes expect them to be parsed. Unless the users
+ # NOTE: explicitly use the double quotes of one of the multiline string syntaxes
+ # NOTE: in the collection metadata file, PyYAML will parse a value containing
+ # NOTE: two dot-separated integers as `float`, a single integer as `int`, and 3+
+ # NOTE: integers as a `str`. In some cases, they may also use an empty value
+ # NOTE: which is normalized as `null` and turned into `None` in the Python-land.
+ # NOTE: Another known mistake is setting a minor part of the SemVer notation
+ # NOTE: skipping the "patch" bit like "1.0" which is assumed non-compliant even
+ # NOTE: after the conversion to string.
+ if not isinstance(version, string_types):
+ raise ValueError(version_err)
+ elif version != '*':
+ try:
+ SemanticVersion(version)
+ except ValueError as ex:
+ raise ValueError(version_err) from ex
+
+ return [
+ Candidate(fqcn, version, _none_src_server, first_req.type, None)
+ for version, _none_src_server in coll_versions
+ ]
+
+ latest_matches = []
+ signatures = []
+ extra_signature_sources = [] # type: list[str]
+ for version, src_server in coll_versions:
+ tmp_candidate = Candidate(fqcn, version, src_server, 'galaxy', None)
+
+ unsatisfied = False
+ for requirement in requirements:
+ unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate)
+ # FIXME
+ # unsatisfied |= not self.is_satisfied_by(requirement, tmp_candidate) or not (
+ # requirement.src is None or # if this is true for some candidates but not all it will break key param - Nonetype can't be compared to str
+ # or requirement.src == candidate.src
+ # )
+ if unsatisfied:
+ break
+ if not self._include_signatures:
+ continue
+
+ extra_signature_sources.extend(requirement.signature_sources or [])
+
+ if not unsatisfied:
+ if self._include_signatures:
+ signatures = src_server.get_collection_signatures(first_req.namespace, first_req.name, version)
+ for extra_source in extra_signature_sources:
+ signatures.append(get_signature_from_source(extra_source))
+ latest_matches.append(
+ Candidate(fqcn, version, src_server, 'galaxy', frozenset(signatures))
+ )
+
+ latest_matches.sort(
+ key=lambda candidate: (
+ SemanticVersion(candidate.ver), candidate.src,
+ ),
+ reverse=True, # prefer newer versions over older ones
+ )
+
+ if not preinstalled_candidates:
+ preinstalled_candidates = {
+ candidate for candidate in self._preferred_candidates
+ if candidate.fqcn == fqcn and
+ (
+ # check if an upgrade is necessary
+ all(self.is_satisfied_by(requirement, candidate) for requirement in requirements) and
+ (
+ not self._upgrade or
+ # check if an upgrade is preferred
+ all(SemanticVersion(latest.ver) <= SemanticVersion(candidate.ver) for latest in latest_matches)
+ )
+ )
+ }
+
+ return list(preinstalled_candidates) + latest_matches
+
+ def is_satisfied_by(self, requirement, candidate):
+ # type: (Requirement, Candidate) -> bool
+ r"""Whether the given requirement is satisfiable by a candidate.
+
+ :param requirement: A requirement that produced the `candidate`.
+
+ :param candidate: A pinned candidate supposedly matchine the \
+ `requirement` specifier. It is guaranteed to \
+ have been generated from the `requirement`.
+
+ :returns: Indication whether the `candidate` is a viable \
+ solution to the `requirement`.
+ """
+ # NOTE: Only allow pre-release candidates if we want pre-releases
+ # NOTE: or the req ver was an exact match with the pre-release
+ # NOTE: version. Another case where we'd want to allow
+ # NOTE: pre-releases is when there are several user requirements
+ # NOTE: and one of them is a pre-release that also matches a
+ # NOTE: transitive dependency of another requirement.
+ allow_pre_release = self._with_pre_releases or not (
+ requirement.ver == '*' or
+ requirement.ver.startswith('<') or
+ requirement.ver.startswith('>') or
+ requirement.ver.startswith('!=')
+ ) or self._is_user_requested(candidate)
+ if is_pre_release(candidate.ver) and not allow_pre_release:
+ return False
+
+ # NOTE: This is a set of Pipenv-inspired optimizations. Ref:
+ # https://github.com/sarugaku/passa/blob/2ac00f1/src/passa/models/providers.py#L58-L74
+ if (
+ requirement.is_virtual or
+ candidate.is_virtual or
+ requirement.ver == '*'
+ ):
+ return True
+
+ return meets_requirements(
+ version=candidate.ver,
+ requirements=requirement.ver,
+ )
+
+ def get_dependencies(self, candidate):
+ # type: (Candidate) -> list[Candidate]
+ r"""Get direct dependencies of a candidate.
+
+ :returns: A collection of requirements that `candidate` \
+ specifies as its dependencies.
+ """
+ # FIXME: If there's several galaxy servers set, there may be a
+ # FIXME: situation when the metadata of the same collection
+ # FIXME: differs. So how do we resolve this case? Priority?
+ # FIXME: Taking into account a pinned hash? Exploding on
+ # FIXME: any differences?
+ # NOTE: The underlying implmentation currently uses first found
+ req_map = self._api_proxy.get_collection_dependencies(candidate)
+
+ # NOTE: This guard expression MUST perform an early exit only
+ # NOTE: after the `get_collection_dependencies()` call because
+ # NOTE: internally it polulates the artifact URL of the candidate,
+ # NOTE: its SHA hash and the Galaxy API token. These are still
+ # NOTE: necessary with `--no-deps` because even with the disabled
+ # NOTE: dependency resolution the outer layer will still need to
+ # NOTE: know how to download and validate the artifact.
+ #
+ # NOTE: Virtual candidates should always return dependencies
+ # NOTE: because they are ephemeral and non-installable.
+ if not self._with_deps and not candidate.is_virtual:
+ return []
+
+ return [
+ self._make_req_from_dict({'name': dep_name, 'version': dep_req})
+ for dep_name, dep_req in req_map.items()
+ ]
+
+
+# Classes to handle resolvelib API changes between minor versions for 0.X
+class CollectionDependencyProvider050(CollectionDependencyProviderBase):
+ def find_matches(self, requirements): # type: ignore[override]
+ # type: (list[Requirement]) -> list[Candidate]
+ return self._find_matches(requirements)
+
+ def get_preference(self, resolution, candidates, information): # type: ignore[override]
+ # type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int]
+ return self._get_preference(candidates)
+
+
+class CollectionDependencyProvider060(CollectionDependencyProviderBase):
+ def find_matches(self, identifier, requirements, incompatibilities): # type: ignore[override]
+ # type: (str, t.Mapping[str, t.Iterator[Requirement]], t.Mapping[str, t.Iterator[Requirement]]) -> list[Candidate]
+ return [
+ match for match in self._find_matches(list(requirements[identifier]))
+ if not any(match.ver == incompat.ver for incompat in incompatibilities[identifier])
+ ]
+
+ def get_preference(self, resolution, candidates, information): # type: ignore[override]
+ # type: (t.Optional[Candidate], list[Candidate], list[t.NamedTuple]) -> t.Union[float, int]
+ return self._get_preference(candidates)
+
+
+class CollectionDependencyProvider070(CollectionDependencyProvider060):
+ def get_preference(self, identifier, resolutions, candidates, information): # type: ignore[override]
+ # type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple]) -> t.Union[float, int]
+ return self._get_preference(list(candidates[identifier]))
+
+
+class CollectionDependencyProvider080(CollectionDependencyProvider060):
+ def get_preference(self, identifier, resolutions, candidates, information, backtrack_causes): # type: ignore[override]
+ # type: (str, t.Mapping[str, Candidate], t.Mapping[str, t.Iterator[Candidate]], t.Iterator[t.NamedTuple], t.Sequence) -> t.Union[float, int]
+ return self._get_preference(list(candidates[identifier]))
+
+
+def _get_provider(): # type () -> CollectionDependencyProviderBase
+ if RESOLVELIB_VERSION >= SemanticVersion("0.8.0"):
+ return CollectionDependencyProvider080
+ if RESOLVELIB_VERSION >= SemanticVersion("0.7.0"):
+ return CollectionDependencyProvider070
+ if RESOLVELIB_VERSION >= SemanticVersion("0.6.0"):
+ return CollectionDependencyProvider060
+ return CollectionDependencyProvider050
+
+
+CollectionDependencyProvider = _get_provider()
diff --git a/lib/ansible/galaxy/dependency_resolution/reporters.py b/lib/ansible/galaxy/dependency_resolution/reporters.py
new file mode 100644
index 0000000..69908b2
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/reporters.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requiement reporter implementations."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from resolvelib import BaseReporter
+except ImportError:
+ class BaseReporter: # type: ignore[no-redef]
+ pass
+
+
+class CollectionDependencyReporter(BaseReporter):
+ """A dependency reporter for Ansible Collections.
+
+ This is a proxy class allowing us to abstract away importing resolvelib
+ outside of the `ansible.galaxy.dependency_resolution` Python package.
+ """
diff --git a/lib/ansible/galaxy/dependency_resolution/resolvers.py b/lib/ansible/galaxy/dependency_resolution/resolvers.py
new file mode 100644
index 0000000..87ca38d
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/resolvers.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020-2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Requirement resolver implementations."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from resolvelib import Resolver
+except ImportError:
+ class Resolver: # type: ignore[no-redef]
+ pass
+
+
+class CollectionDependencyResolver(Resolver):
+ """A dependency resolver for Ansible Collections.
+
+ This is a proxy class allowing us to abstract away importing resolvelib
+ outside of the `ansible.galaxy.dependency_resolution` Python package.
+ """
diff --git a/lib/ansible/galaxy/dependency_resolution/versioning.py b/lib/ansible/galaxy/dependency_resolution/versioning.py
new file mode 100644
index 0000000..93adce4
--- /dev/null
+++ b/lib/ansible/galaxy/dependency_resolution/versioning.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019-2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Version comparison helpers."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import operator
+
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.utils.version import SemanticVersion
+
+
+def is_pre_release(version):
+ # type: (str) -> bool
+ """Figure out if a given version is a pre-release."""
+ try:
+ return SemanticVersion(version).is_prerelease
+ except ValueError:
+ return False
+
+
+def meets_requirements(version, requirements):
+ # type: (str, str) -> bool
+ """Verify if a given version satisfies all the requirements.
+
+ Supported version identifiers are:
+ * '=='
+ * '!='
+ * '>'
+ * '>='
+ * '<'
+ * '<='
+ * '*'
+
+ Each requirement is delimited by ','.
+ """
+ op_map = {
+ '!=': operator.ne,
+ '==': operator.eq,
+ '=': operator.eq,
+ '>=': operator.ge,
+ '>': operator.gt,
+ '<=': operator.le,
+ '<': operator.lt,
+ }
+
+ for req in requirements.split(','):
+ op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
+ op = op_map.get(req[:op_pos])
+
+ requirement = req[op_pos:]
+ if not op:
+ requirement = req
+ op = operator.eq
+
+ if requirement == '*' or version == '*':
+ continue
+
+ if not op(
+ SemanticVersion(version),
+ SemanticVersion.from_loose_version(LooseVersion(requirement)),
+ ):
+ break
+ else:
+ return True
+
+ # The loop was broken early, it does not meet all the requirements
+ return False
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
new file mode 100644
index 0000000..99bb525
--- /dev/null
+++ b/lib/ansible/galaxy/role.py
@@ -0,0 +1,439 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import datetime
+import os
+import tarfile
+import tempfile
+
+from collections.abc import MutableSequence
+from shutil import rmtree
+
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.yaml import yaml_dump, yaml_load
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.module_utils.urls import open_url
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class GalaxyRole(object):
+
+ SUPPORTED_SCMS = set(['git', 'hg'])
+ META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml'))
+ META_INSTALL = os.path.join('meta', '.galaxy_install_info')
+ META_REQUIREMENTS = (os.path.join('meta', 'requirements.yml'), os.path.join('meta', 'requirements.yaml'))
+ ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
+
+ def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None):
+
+ self._metadata = None
+ self._metadata_dependencies = None
+ self._requirements = None
+ self._install_info = None
+ self._validate_certs = not context.CLIARGS['ignore_certs']
+
+ display.debug('Validate TLS certificates: %s' % self._validate_certs)
+
+ self.galaxy = galaxy
+ self._api = api
+
+ self.name = name
+ self.version = version
+ self.src = src or name
+ self.download_url = None
+ self.scm = scm
+ self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths]
+
+ if path is not None:
+ if not path.endswith(os.path.join(os.path.sep, self.name)):
+ path = os.path.join(path, self.name)
+ else:
+ # Look for a meta/main.ya?ml inside the potential role dir in case
+ # the role name is the same as parent directory of the role.
+ #
+ # Example:
+ # ./roles/testing/testing/meta/main.yml
+ for meta_main in self.META_MAIN:
+ if os.path.exists(os.path.join(path, name, meta_main)):
+ path = os.path.join(path, self.name)
+ break
+ self.path = path
+ else:
+ # use the first path by default
+ self.path = self.paths[0]
+
+ def __repr__(self):
+ """
+ Returns "rolename (version)" if version is not null
+ Returns "rolename" otherwise
+ """
+ if self.version:
+ return "%s (%s)" % (self.name, self.version)
+ else:
+ return self.name
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ @property
+ def api(self):
+ if not isinstance(self._api, GalaxyAPI):
+ return self._api.api
+ return self._api
+
+ @property
+ def metadata(self):
+ """
+ Returns role metadata
+ """
+ if self._metadata is None:
+ for path in self.paths:
+ for meta_main in self.META_MAIN:
+ meta_path = os.path.join(path, meta_main)
+ if os.path.isfile(meta_path):
+ try:
+ with open(meta_path, 'r') as f:
+ self._metadata = yaml_load(f)
+ except Exception:
+ display.vvvvv("Unable to load metadata for %s" % self.name)
+ return False
+ break
+
+ return self._metadata
+
+ @property
+ def metadata_dependencies(self):
+ """
+ Returns a list of dependencies from role metadata
+ """
+ if self._metadata_dependencies is None:
+ self._metadata_dependencies = []
+
+ if self.metadata is not None:
+ self._metadata_dependencies = self.metadata.get('dependencies') or []
+
+ if not isinstance(self._metadata_dependencies, MutableSequence):
+ raise AnsibleParserError(
+ f"Expected role dependencies to be a list. Role {self} has meta/main.yml with dependencies {self._metadata_dependencies}"
+ )
+
+ return self._metadata_dependencies
+
+ @property
+ def install_info(self):
+ """
+ Returns role install info
+ """
+ if self._install_info is None:
+
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ if os.path.isfile(info_path):
+ try:
+ f = open(info_path, 'r')
+ self._install_info = yaml_load(f)
+ except Exception:
+ display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
+ return False
+ finally:
+ f.close()
+ return self._install_info
+
+ @property
+ def _exists(self):
+ for path in self.paths:
+ if os.path.isdir(path):
+ return True
+
+ return False
+
+ def _write_galaxy_install_info(self):
+ """
+ Writes a YAML-formatted file to the role's meta/ directory
+ (named .galaxy_install_info) which contains some information
+ we can use later for commands like 'list' and 'info'.
+ """
+
+ info = dict(
+ version=self.version,
+ install_date=datetime.datetime.utcnow().strftime("%c"),
+ )
+ if not os.path.exists(os.path.join(self.path, 'meta')):
+ os.makedirs(os.path.join(self.path, 'meta'))
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ with open(info_path, 'w+') as f:
+ try:
+ self._install_info = yaml_dump(info, f)
+ except Exception:
+ return False
+
+ return True
+
+ def remove(self):
+ """
+ Removes the specified role from the roles path.
+ There is a sanity check to make sure there's a meta/main.yml file at this
+ path so the user doesn't blow away random directories.
+ """
+ if self.metadata:
+ try:
+ rmtree(self.path)
+ return True
+ except Exception:
+ pass
+
+ return False
+
+ def fetch(self, role_data):
+ """
+ Downloads the archived role to a temp location based on role data
+ """
+ if role_data:
+
+ # first grab the file and save it to a temp location
+ if self.download_url is not None:
+ archive_url = self.download_url
+ elif "github_user" in role_data and "github_repo" in role_data:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
+ else:
+ archive_url = self.src
+
+ display.display("- downloading role from %s" % archive_url)
+
+ try:
+ url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
+ data = url_file.read()
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except Exception as e:
+ display.error(u"failed to download the file: %s" % to_text(e))
+
+ return False
+
+ def install(self):
+
+ if self.scm:
+ # create tar file from scm url
+ tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec)
+ elif self.src:
+ if os.path.isfile(self.src):
+ tmp_file = self.src
+ elif '://' in self.src:
+ role_data = self.src
+ tmp_file = self.fetch(role_data)
+ else:
+ role_data = self.api.lookup_role_by_name(self.src)
+ if not role_data:
+ raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server))
+
+ if role_data.get('role_type') == 'APP':
+ # Container Role
+ display.warning("%s is a Container App role, and should only be installed using Ansible "
+ "Container" % self.name)
+
+ role_versions = self.api.fetch_role_related('versions', role_data['id'])
+ if not self.version:
+ # convert the version names to LooseVersion objects
+ # and sort them to get the latest version. If there
+ # are no versions in the list, we'll grab the head
+ # of the master branch
+ if len(role_versions) > 0:
+ loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
+ try:
+ loose_versions.sort()
+ except TypeError:
+ raise AnsibleError(
+ 'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
+ 'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
+ 'install.' % ', '.join([v.vstring for v in loose_versions])
+ )
+ self.version = to_text(loose_versions[-1])
+ elif role_data.get('github_branch', None):
+ self.version = role_data['github_branch']
+ else:
+ self.version = 'master'
+ elif self.version != 'master':
+ if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]:
+ raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
+ self.name,
+ role_versions))
+
+ # check if there's a source link/url for our role_version
+ for role_version in role_versions:
+ if role_version['name'] == self.version and 'source' in role_version:
+ self.src = role_version['source']
+ if role_version['name'] == self.version and 'download_url' in role_version:
+ self.download_url = role_version['download_url']
+
+ tmp_file = self.fetch(role_data)
+
+ else:
+ raise AnsibleError("No valid role data found")
+
+ if tmp_file:
+
+ display.debug("installing from %s" % tmp_file)
+
+ if not tarfile.is_tarfile(tmp_file):
+ raise AnsibleError("the downloaded file does not appear to be a valid tar archive.")
+ else:
+ role_tar_file = tarfile.open(tmp_file, "r")
+ # verify the role's meta file
+ meta_file = None
+ members = role_tar_file.getmembers()
+ # next find the metadata file
+ for member in members:
+ for meta_main in self.META_MAIN:
+ if meta_main in member.name:
+ # Look for parent of meta/main.yml
+ # Due to possibility of sub roles each containing meta/main.yml
+ # look for shortest length parent
+ meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
+ if not meta_file:
+ archive_parent_dir = meta_parent_dir
+ meta_file = member
+ else:
+ if len(meta_parent_dir) < len(archive_parent_dir):
+ archive_parent_dir = meta_parent_dir
+ meta_file = member
+ if not meta_file:
+ raise AnsibleError("this role does not appear to have a meta/main.yml file.")
+ else:
+ try:
+ self._metadata = yaml_load(role_tar_file.extractfile(meta_file))
+ except Exception:
+ raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
+
+ paths = self.paths
+ if self.path != paths[0]:
+ # path can be passed though __init__
+ # FIXME should this be done in __init__?
+ paths[:0] = self.path
+ paths_len = len(paths)
+ for idx, path in enumerate(paths):
+ self.path = path
+ display.display("- extracting %s to %s" % (self.name, self.path))
+ try:
+ if os.path.exists(self.path):
+ if not os.path.isdir(self.path):
+ raise AnsibleError("the specified roles path exists and is not a directory.")
+ elif not context.CLIARGS.get("force", False):
+ raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
+ else:
+ # using --force, remove the old path
+ if not self.remove():
+ raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
+ "want to put the role here." % self.path)
+ else:
+ os.makedirs(self.path)
+
+ # We strip off any higher-level directories for all of the files
+ # contained within the tar file here. The default is 'github_repo-target'.
+ # Gerrit instances, on the other hand, does not have a parent directory at all.
+ for member in members:
+ # we only extract files, and remove any relative path
+ # bits that might be in the file for security purposes
+ # and drop any containing directory, as mentioned above
+ if member.isreg() or member.issym():
+ n_member_name = to_native(member.name)
+ n_archive_parent_dir = to_native(archive_parent_dir)
+ n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep)
+ n_final_parts = []
+ for n_part in n_parts:
+ # TODO if the condition triggers it produces a broken installation.
+ # It will create the parent directory as an empty file and will
+ # explode if the directory contains valid files.
+ # Leaving this as is since the whole module needs a rewrite.
+ if n_part != '..' and not n_part.startswith('~') and '$' not in n_part:
+ n_final_parts.append(n_part)
+ member.name = os.path.join(*n_final_parts)
+ role_tar_file.extract(member, to_native(self.path))
+
+ # write out the install info file for later use
+ self._write_galaxy_install_info()
+ break
+ except OSError as e:
+ if e.errno == errno.EACCES and idx < paths_len - 1:
+ continue
+ raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e)))
+
+ # return the parsed yaml metadata
+ display.display("- %s was installed successfully" % str(self))
+ if not (self.src and os.path.isfile(self.src)):
+ try:
+ os.unlink(tmp_file)
+ except (OSError, IOError) as e:
+ display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
+ return True
+
+ return False
+
+ @property
+ def spec(self):
+ """
+ Returns role spec info
+ {
+ 'scm': 'git',
+ 'src': 'http://git.example.com/repos/repo.git',
+ 'version': 'v1.0',
+ 'name': 'repo'
+ }
+ """
+ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
+
+ @property
+ def requirements(self):
+ """
+ Returns role requirements
+ """
+ if self._requirements is None:
+ self._requirements = []
+ for meta_requirements in self.META_REQUIREMENTS:
+ meta_path = os.path.join(self.path, meta_requirements)
+ if os.path.isfile(meta_path):
+ try:
+ f = open(meta_path, 'r')
+ self._requirements = yaml_load(f)
+ except Exception:
+ display.vvvvv("Unable to load requirements for %s" % self.name)
+ finally:
+ f.close()
+
+ break
+
+ if not isinstance(self._requirements, MutableSequence):
+ raise AnsibleParserError(f"Expected role dependencies to be a list. Role {self} has meta/requirements.yml {self._requirements}")
+
+ return self._requirements
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
new file mode 100644
index 0000000..4455fd0
--- /dev/null
+++ b/lib/ansible/galaxy/token.py
@@ -0,0 +1,187 @@
+########################################################################
+#
+# (C) 2015, Chris Houseknecht <chouse@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import os
+import json
+from stat import S_IRUSR, S_IWUSR
+
+from ansible import constants as C
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.yaml import yaml_dump, yaml_load
+from ansible.module_utils.urls import open_url
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class NoTokenSentinel(object):
+ """ Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
+ def __new__(cls, *args, **kwargs):
+ return cls
+
+
+class KeycloakToken(object):
+ '''A token granted by a Keycloak server.
+
+ Like sso.redhat.com as used by cloud.redhat.com
+ ie Automation Hub'''
+
+ token_type = 'Bearer'
+
+ def __init__(self, access_token=None, auth_url=None, validate_certs=True, client_id=None):
+ self.access_token = access_token
+ self.auth_url = auth_url
+ self._token = None
+ self.validate_certs = validate_certs
+ self.client_id = client_id
+ if self.client_id is None:
+ self.client_id = 'cloud-services'
+
+ def _form_payload(self):
+ return 'grant_type=refresh_token&client_id=%s&refresh_token=%s' % (self.client_id,
+ self.access_token)
+
+ def get(self):
+ if self._token:
+ return self._token
+
+ # - build a request to POST to auth_url
+ # - body is form encoded
+ # - 'request_token' is the offline token stored in ansible.cfg
+ # - 'grant_type' is 'refresh_token'
+ # - 'client_id' is 'cloud-services'
+ # - should probably be based on the contents of the
+ # offline_ticket's JWT payload 'aud' (audience)
+ # or 'azp' (Authorized party - the party to which the ID Token was issued)
+ payload = self._form_payload()
+
+ resp = open_url(to_native(self.auth_url),
+ data=payload,
+ validate_certs=self.validate_certs,
+ method='POST',
+ http_agent=user_agent())
+
+ # TODO: handle auth errors
+
+ data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+
+ # - extract 'access_token'
+ self._token = data.get('access_token')
+
+ return self._token
+
+ def headers(self):
+ headers = {}
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
+
+
+class GalaxyToken(object):
+ ''' Class to storing and retrieving local galaxy token '''
+
+ token_type = 'Token'
+
+ def __init__(self, token=None):
+ self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
+ # Done so the config file is only opened when set/get/save is called
+ self._config = None
+ self._token = token
+
+ @property
+ def config(self):
+ if self._config is None:
+ self._config = self._read()
+
+ # Prioritise the token passed into the constructor
+ if self._token:
+ self._config['token'] = None if self._token is NoTokenSentinel else self._token
+
+ return self._config
+
+ def _read(self):
+ action = 'Opened'
+ if not os.path.isfile(self.b_file):
+ # token file not found, create and chmod u+rw
+ open(self.b_file, 'w').close()
+ os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
+ action = 'Created'
+
+ with open(self.b_file, 'r') as f:
+ config = yaml_load(f)
+
+ display.vvv('%s %s' % (action, to_text(self.b_file)))
+
+ if config and not isinstance(config, dict):
+ display.vvv('Galaxy token file %s malformed, unable to read it' % to_text(self.b_file))
+ return {}
+
+ return config or {}
+
+ def set(self, token):
+ self._token = token
+ self.save()
+
+ def get(self):
+ return self.config.get('token', None)
+
+ def save(self):
+ with open(self.b_file, 'w') as f:
+ yaml_dump(self.config, f, default_flow_style=False)
+
+ def headers(self):
+ headers = {}
+ token = self.get()
+ if token:
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
+
+
+class BasicAuthToken(object):
+ token_type = 'Basic'
+
+ def __init__(self, username, password=None):
+ self.username = username
+ self.password = password
+ self._token = None
+
+ @staticmethod
+ def _encode_token(username, password):
+ token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
+ to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
+ b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
+ return to_text(b64_val)
+
+ def get(self):
+ if self._token:
+ return self._token
+
+ self._token = self._encode_token(self.username, self.password)
+
+ return self._token
+
+ def headers(self):
+ headers = {}
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
diff --git a/lib/ansible/galaxy/user_agent.py b/lib/ansible/galaxy/user_agent.py
new file mode 100644
index 0000000..c860bcd
--- /dev/null
+++ b/lib/ansible/galaxy/user_agent.py
@@ -0,0 +1,23 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import platform
+import sys
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+
+
+def user_agent():
+ """Returns a user agent used by ansible-galaxy to include the Ansible version, platform and python version."""
+
+ python_version = sys.version_info
+ return u"ansible-galaxy/{ansible_version} ({platform}; python:{py_major}.{py_minor}.{py_micro})".format(
+ ansible_version=ansible_version,
+ platform=platform.system(),
+ py_major=python_version.major,
+ py_minor=python_version.minor,
+ py_micro=python_version.micro,
+ )
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/inventory/__init__.py
diff --git a/lib/ansible/inventory/data.py b/lib/ansible/inventory/data.py
new file mode 100644
index 0000000..15a6420
--- /dev/null
+++ b/lib/ansible/inventory/data.py
@@ -0,0 +1,283 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.inventory.group import Group
+from ansible.inventory.host import Host
+from ansible.module_utils.six import string_types
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+from ansible.utils.path import basedir
+
+display = Display()
+
+
+class InventoryData(object):
+ """
+ Holds inventory data (host and group objects).
+ Using it's methods should guarantee expected relationships and data.
+ """
+
+ def __init__(self):
+
+ self.groups = {}
+ self.hosts = {}
+
+ # provides 'groups' magic var, host object has group_names
+ self._groups_dict_cache = {}
+
+ # current localhost, implicit or explicit
+ self.localhost = None
+
+ self.current_source = None
+ self.processed_sources = []
+
+ # Always create the 'all' and 'ungrouped' groups,
+ for group in ('all', 'ungrouped'):
+ self.add_group(group)
+ self.add_child('all', 'ungrouped')
+
+ def serialize(self):
+ self._groups_dict_cache = None
+ data = {
+ 'groups': self.groups,
+ 'hosts': self.hosts,
+ 'local': self.localhost,
+ 'source': self.current_source,
+ 'processed_sources': self.processed_sources
+ }
+ return data
+
+ def deserialize(self, data):
+ self._groups_dict_cache = {}
+ self.hosts = data.get('hosts')
+ self.groups = data.get('groups')
+ self.localhost = data.get('local')
+ self.current_source = data.get('source')
+ self.processed_sources = data.get('processed_sources')
+
+ def _create_implicit_localhost(self, pattern):
+
+ if self.localhost:
+ new_host = self.localhost
+ else:
+ new_host = Host(pattern)
+
+ new_host.address = "127.0.0.1"
+ new_host.implicit = True
+
+ # set localhost defaults
+ py_interp = sys.executable
+ if not py_interp:
+ # sys.executable is not set in some cornercases. see issue #13585
+ py_interp = '/usr/bin/python'
+ display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
+ 'You can correct this by setting ansible_python_interpreter for localhost')
+ new_host.set_variable("ansible_python_interpreter", py_interp)
+ new_host.set_variable("ansible_connection", 'local')
+
+ self.localhost = new_host
+
+ return new_host
+
+ def reconcile_inventory(self):
+ ''' Ensure inventory basic rules, run after updates '''
+
+ display.debug('Reconcile groups and hosts in inventory.')
+ self.current_source = None
+
+ group_names = set()
+ # set group vars from group_vars/ files and vars plugins
+ for g in self.groups:
+ group = self.groups[g]
+ group_names.add(group.name)
+
+ # ensure all groups inherit from 'all'
+ if group.name != 'all' and not group.get_ancestors():
+ self.add_child('all', group.name)
+
+ host_names = set()
+ # get host vars from host_vars/ files and vars plugins
+ for host in self.hosts.values():
+ host_names.add(host.name)
+
+ mygroups = host.get_groups()
+
+ if self.groups['ungrouped'] in mygroups:
+ # clear ungrouped of any incorrectly stored by parser
+ if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
+ self.groups['ungrouped'].remove_host(host)
+
+ elif not host.implicit:
+ # add ungrouped hosts to ungrouped, except implicit
+ length = len(mygroups)
+ if length == 0 or (length == 1 and self.groups['all'] in mygroups):
+ self.add_child('ungrouped', host.name)
+
+ # special case for implicit hosts
+ if host.implicit:
+ host.vars = combine_vars(self.groups['all'].get_vars(), host.vars)
+
+ # warn if overloading identifier as both group and host
+ for conflict in group_names.intersection(host_names):
+ display.warning("Found both group and host with same name: %s" % conflict)
+
+ self._groups_dict_cache = {}
+
+ def get_host(self, hostname):
+ ''' fetch host object using name deal with implicit localhost '''
+
+ matching_host = self.hosts.get(hostname, None)
+
+ # if host is not in hosts dict
+ if matching_host is None and hostname in C.LOCALHOST:
+ # might need to create implicit localhost
+ matching_host = self._create_implicit_localhost(hostname)
+
+ return matching_host
+
+ def add_group(self, group):
+ ''' adds a group to inventory if not there already, returns named actually used '''
+
+ if group:
+ if not isinstance(group, string_types):
+ raise AnsibleError("Invalid group name supplied, expected a string but got %s for %s" % (type(group), group))
+ if group not in self.groups:
+ g = Group(group)
+ if g.name not in self.groups:
+ self.groups[g.name] = g
+ self._groups_dict_cache = {}
+ display.debug("Added group %s to inventory" % group)
+ group = g.name
+ else:
+ display.debug("group %s already in inventory" % group)
+ else:
+ raise AnsibleError("Invalid empty/false group name provided: %s" % group)
+
+ return group
+
+ def remove_group(self, group):
+
+ if group in self.groups:
+ del self.groups[group]
+ display.debug("Removed group %s from inventory" % group)
+ self._groups_dict_cache = {}
+
+ for host in self.hosts:
+ h = self.hosts[host]
+ h.remove_group(group)
+
+ def add_host(self, host, group=None, port=None):
+ ''' adds a host to inventory and possibly a group if not there already '''
+
+ if host:
+ if not isinstance(host, string_types):
+ raise AnsibleError("Invalid host name supplied, expected a string but got %s for %s" % (type(host), host))
+
+ # TODO: add to_safe_host_name
+ g = None
+ if group:
+ if group in self.groups:
+ g = self.groups[group]
+ else:
+ raise AnsibleError("Could not find group %s in inventory" % group)
+
+ if host not in self.hosts:
+ h = Host(host, port)
+ self.hosts[host] = h
+ if self.current_source: # set to 'first source' in which host was encountered
+ self.set_variable(host, 'inventory_file', self.current_source)
+ self.set_variable(host, 'inventory_dir', basedir(self.current_source))
+ else:
+ self.set_variable(host, 'inventory_file', None)
+ self.set_variable(host, 'inventory_dir', None)
+ display.debug("Added host %s to inventory" % (host))
+
+ # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
+ if host in C.LOCALHOST:
+ if self.localhost is None:
+ self.localhost = self.hosts[host]
+ display.vvvv("Set default localhost to %s" % h)
+ else:
+ display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
+ else:
+ h = self.hosts[host]
+
+ if g:
+ g.add_host(h)
+ self._groups_dict_cache = {}
+ display.debug("Added host %s to group %s" % (host, group))
+ else:
+ raise AnsibleError("Invalid empty host name provided: %s" % host)
+
+ return host
+
+ def remove_host(self, host):
+
+ if host.name in self.hosts:
+ del self.hosts[host.name]
+
+ for group in self.groups:
+ g = self.groups[group]
+ g.remove_host(host)
+
+ def set_variable(self, entity, varname, value):
+ ''' sets a variable for an inventory object '''
+
+ if entity in self.groups:
+ inv_object = self.groups[entity]
+ elif entity in self.hosts:
+ inv_object = self.hosts[entity]
+ else:
+ raise AnsibleError("Could not identify group or host named %s" % entity)
+
+ inv_object.set_variable(varname, value)
+ display.debug('set %s for %s' % (varname, entity))
+
+ def add_child(self, group, child):
+ ''' Add host or group to group '''
+ added = False
+ if group in self.groups:
+ g = self.groups[group]
+ if child in self.groups:
+ added = g.add_child_group(self.groups[child])
+ elif child in self.hosts:
+ added = g.add_host(self.hosts[child])
+ else:
+ raise AnsibleError("%s is not a known host nor group" % child)
+ self._groups_dict_cache = {}
+ display.debug('Group %s now contains %s' % (group, child))
+ else:
+ raise AnsibleError("%s is not a known group" % group)
+ return added
+
+ def get_groups_dict(self):
+ """
+ We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
+ """
+ if not self._groups_dict_cache:
+ for (group_name, group) in self.groups.items():
+ self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
+
+ return self._groups_dict_cache
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
new file mode 100644
index 0000000..c7af685
--- /dev/null
+++ b/lib/ansible/inventory/group.py
@@ -0,0 +1,288 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping, MutableMapping
+from itertools import chain
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+def to_safe_group_name(name, replacer="_", force=False, silent=False):
+ # Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
+
+ warn = ''
+ if name: # when deserializing we might not have name yet
+ invalid_chars = C.INVALID_VARIABLE_NAMES.findall(name)
+ if invalid_chars:
+ msg = 'invalid character(s) "%s" in group name (%s)' % (to_text(set(invalid_chars)), to_text(name))
+ if C.TRANSFORM_INVALID_GROUP_CHARS not in ('never', 'ignore') or force:
+ name = C.INVALID_VARIABLE_NAMES.sub(replacer, name)
+ if not (silent or C.TRANSFORM_INVALID_GROUP_CHARS == 'silently'):
+ display.vvvv('Replacing ' + msg)
+ warn = 'Invalid characters were found in group names and automatically replaced, use -vvvv to see details'
+ else:
+ if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
+ display.vvvv('Not replacing %s' % msg)
+ warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
+
+ if warn:
+ display.warning(warn)
+
+ return name
+
+
+class Group:
+ ''' a group of ansible hosts '''
+
+ # __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+
+ def __init__(self, name=None):
+
+ self.depth = 0
+ self.name = to_safe_group_name(name)
+ self.hosts = []
+ self._hosts = None
+ self.vars = {}
+ self.child_groups = []
+ self.parent_groups = []
+ self._hosts_cache = None
+ self.priority = 1
+
+ def __repr__(self):
+ return self.get_name()
+
+ def __str__(self):
+ return self.get_name()
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def serialize(self):
+ parent_groups = []
+ for parent in self.parent_groups:
+ parent_groups.append(parent.serialize())
+
+ self._hosts = None
+
+ result = dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ parent_groups=parent_groups,
+ depth=self.depth,
+ hosts=self.hosts,
+ )
+
+ return result
+
+ def deserialize(self, data):
+ self.__init__() # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.depth = data.get('depth', 0)
+ self.hosts = data.get('hosts', [])
+ self._hosts = None
+
+ parent_groups = data.get('parent_groups', [])
+ for parent_data in parent_groups:
+ g = Group()
+ g.deserialize(parent_data)
+ self.parent_groups.append(g)
+
+ def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
+ '''
+ Given `rel` that is an iterable property of Group,
+ consitituting a directed acyclic graph among all groups,
+ Returns a set of all groups in full tree
+ A B C
+ | / | /
+ | / | /
+ D -> E
+ | / vertical connections
+ | / are directed upward
+ F
+ Called on F, returns set of (A, B, C, D, E)
+ '''
+ seen = set([])
+ unprocessed = set(getattr(self, rel))
+ if include_self:
+ unprocessed.add(self)
+ if preserve_ordering:
+ ordered = [self] if include_self else []
+ ordered.extend(getattr(self, rel))
+
+ while unprocessed:
+ seen.update(unprocessed)
+ new_unprocessed = set([])
+
+ for new_item in chain.from_iterable(getattr(g, rel) for g in unprocessed):
+ new_unprocessed.add(new_item)
+ if preserve_ordering:
+ if new_item not in seen:
+ ordered.append(new_item)
+
+ new_unprocessed.difference_update(seen)
+ unprocessed = new_unprocessed
+
+ if preserve_ordering:
+ return ordered
+ return seen
+
+ def get_ancestors(self):
+ return self._walk_relationship('parent_groups')
+
+ def get_descendants(self, **kwargs):
+ return self._walk_relationship('child_groups', **kwargs)
+
+ @property
+ def host_names(self):
+ if self._hosts is None:
+ self._hosts = set(self.hosts)
+ return self._hosts
+
+ def get_name(self):
+ return self.name
+
+ def add_child_group(self, group):
+ added = False
+ if self == group:
+ raise Exception("can't add group to itself")
+
+ # don't add if it's already there
+ if group not in self.child_groups:
+
+ # prepare list of group's new ancestors this edge creates
+ start_ancestors = group.get_ancestors()
+ new_ancestors = self.get_ancestors()
+ if group in new_ancestors:
+ raise AnsibleError("Adding group '%s' as child to '%s' creates a recursive dependency loop." % (to_native(group.name), to_native(self.name)))
+ new_ancestors.add(self)
+ new_ancestors.difference_update(start_ancestors)
+
+ added = True
+ self.child_groups.append(group)
+
+ # update the depth of the child
+ group.depth = max([self.depth + 1, group.depth])
+
+ # update the depth of the grandchildren
+ group._check_children_depth()
+
+ # now add self to child's parent_groups list, but only if there
+ # isn't already a group with the same name
+ if self.name not in [g.name for g in group.parent_groups]:
+ group.parent_groups.append(self)
+ for h in group.get_hosts():
+ h.populate_ancestors(additions=new_ancestors)
+
+ self.clear_hosts_cache()
+ return added
+
+ def _check_children_depth(self):
+
+ depth = self.depth
+ start_depth = self.depth # self.depth could change over loop
+ seen = set([])
+ unprocessed = set(self.child_groups)
+
+ while unprocessed:
+ seen.update(unprocessed)
+ depth += 1
+ to_process = unprocessed.copy()
+ unprocessed = set([])
+ for g in to_process:
+ if g.depth < depth:
+ g.depth = depth
+ unprocessed.update(g.child_groups)
+ if depth - start_depth > len(seen):
+ raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
+
+ def add_host(self, host):
+ added = False
+ if host.name not in self.host_names:
+ self.hosts.append(host)
+ self._hosts.add(host.name)
+ host.add_group(self)
+ self.clear_hosts_cache()
+ added = True
+ return added
+
+ def remove_host(self, host):
+ removed = False
+ if host.name in self.host_names:
+ self.hosts.remove(host)
+ self._hosts.remove(host.name)
+ host.remove_group(self)
+ self.clear_hosts_cache()
+ removed = True
+ return removed
+
+ def set_variable(self, key, value):
+
+ if key == 'ansible_group_priority':
+ self.set_priority(int(value))
+ else:
+ if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
+ self.vars = combine_vars(self.vars, {key: value})
+ else:
+ self.vars[key] = value
+
+ def clear_hosts_cache(self):
+
+ self._hosts_cache = None
+ for g in self.get_ancestors():
+ g._hosts_cache = None
+
+ def get_hosts(self):
+
+ if self._hosts_cache is None:
+ self._hosts_cache = self._get_hosts()
+ return self._hosts_cache
+
+ def _get_hosts(self):
+
+ hosts = []
+ seen = {}
+ for kid in self.get_descendants(include_self=True, preserve_ordering=True):
+ kid_hosts = kid.hosts
+ for kk in kid_hosts:
+ if kk not in seen:
+ seen[kk] = 1
+ if self.name == 'all' and kk.implicit:
+ continue
+ hosts.append(kk)
+ return hosts
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def set_priority(self, priority):
+ try:
+ self.priority = int(priority)
+ except TypeError:
+ # FIXME: warn about invalid priority
+ pass
diff --git a/lib/ansible/inventory/helpers.py b/lib/ansible/inventory/helpers.py
new file mode 100644
index 0000000..39c7221
--- /dev/null
+++ b/lib/ansible/inventory/helpers.py
@@ -0,0 +1,40 @@
+# (c) 2017, Ansible by RedHat Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.utils.vars import combine_vars
+
+
+def sort_groups(groups):
+ return sorted(groups, key=lambda g: (g.depth, g.priority, g.name))
+
+
+def get_group_vars(groups):
+ """
+ Combine all the group vars from a list of inventory groups.
+
+ :param groups: list of ansible.inventory.group.Group objects
+ :rtype: dict
+ """
+ results = {}
+ for group in sort_groups(groups):
+ results = combine_vars(results, group.get_vars())
+
+ return results
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
new file mode 100644
index 0000000..18569ce
--- /dev/null
+++ b/lib/ansible/inventory/host.py
@@ -0,0 +1,169 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping, MutableMapping
+
+from ansible.inventory.group import Group
+from ansible.parsing.utils.addresses import patterns
+from ansible.utils.vars import combine_vars, get_unique_id
+
+
+__all__ = ['Host']
+
+
+class Host:
+ ''' a single ansible host '''
+
+ # __slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def __eq__(self, other):
+ if not isinstance(other, Host):
+ return False
+ return self._uuid == other._uuid
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __str__(self):
+ return self.get_name()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def serialize(self):
+ groups = []
+ for group in self.groups:
+ groups.append(group.serialize())
+
+ return dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ address=self.address,
+ uuid=self._uuid,
+ groups=groups,
+ implicit=self.implicit,
+ )
+
+ def deserialize(self, data):
+ self.__init__(gen_uuid=False) # used by __setstate__ to deserialize in place # pylint: disable=unnecessary-dunder-call
+
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.address = data.get('address', '')
+ self._uuid = data.get('uuid', None)
+ self.implicit = data.get('implicit', False)
+
+ groups = data.get('groups', [])
+ for group_data in groups:
+ g = Group()
+ g.deserialize(group_data)
+ self.groups.append(g)
+
+ def __init__(self, name=None, port=None, gen_uuid=True):
+
+ self.vars = {}
+ self.groups = []
+ self._uuid = None
+
+ self.name = name
+ self.address = name
+
+ if port:
+ self.set_variable('ansible_port', int(port))
+
+ if gen_uuid:
+ self._uuid = get_unique_id()
+ self.implicit = False
+
+ def get_name(self):
+ return self.name
+
+ def populate_ancestors(self, additions=None):
+ # populate ancestors
+ if additions is None:
+ for group in self.groups:
+ self.add_group(group)
+ else:
+ for group in additions:
+ if group not in self.groups:
+ self.groups.append(group)
+
+ def add_group(self, group):
+ added = False
+ # populate ancestors first
+ for oldg in group.get_ancestors():
+ if oldg not in self.groups:
+ self.groups.append(oldg)
+
+ # actually add group
+ if group not in self.groups:
+ self.groups.append(group)
+ added = True
+ return added
+
+ def remove_group(self, group):
+ removed = False
+ if group in self.groups:
+ self.groups.remove(group)
+ removed = True
+
+ # remove exclusive ancestors, xcept all!
+ for oldg in group.get_ancestors():
+ if oldg.name != 'all':
+ for childg in self.groups:
+ if oldg in childg.get_ancestors():
+ break
+ else:
+ self.remove_group(oldg)
+ return removed
+
+ def set_variable(self, key, value):
+ if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
+ self.vars = combine_vars(self.vars, {key: value})
+ else:
+ self.vars[key] = value
+
+ def get_groups(self):
+ return self.groups
+
+ def get_magic_vars(self):
+ results = {}
+ results['inventory_hostname'] = self.name
+ if patterns['ipv4'].match(self.name) or patterns['ipv6'].match(self.name):
+ results['inventory_hostname_short'] = self.name
+ else:
+ results['inventory_hostname_short'] = self.name.split('.')[0]
+
+ results['group_names'] = sorted([g.name for g in self.get_groups() if g.name != 'all'])
+
+ return results
+
+ def get_vars(self):
+ return combine_vars(self.vars, self.get_magic_vars())
diff --git a/lib/ansible/inventory/manager.py b/lib/ansible/inventory/manager.py
new file mode 100644
index 0000000..400bc6b
--- /dev/null
+++ b/lib/ansible/inventory/manager.py
@@ -0,0 +1,752 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+import os
+import sys
+import re
+import itertools
+import traceback
+
+from operator import attrgetter
+from random import shuffle
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.inventory.data import InventoryData
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins.loader import inventory_loader
+from ansible.utils.helpers import deduplicate_list
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+from ansible.vars.plugins import get_vars_from_inventory_sources
+
+display = Display()
+
+IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
+IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
+IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
+
+IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
+
+PATTERN_WITH_SUBSCRIPT = re.compile(
+ r'''^
+ (.+) # A pattern expression ending with...
+ \[(?: # A [subscript] expression comprising:
+ (-?[0-9]+)| # A single positive or negative number
+ ([0-9]+)([:-]) # Or an x:y or x: range.
+ ([0-9]*)
+ )\]
+ $
+ ''', re.X
+)
+
+
+def order_patterns(patterns):
+ ''' takes a list of patterns and reorders them by modifier to apply them consistently '''
+
+ # FIXME: this goes away if we apply patterns incrementally or by groups
+ pattern_regular = []
+ pattern_intersection = []
+ pattern_exclude = []
+ for p in patterns:
+ if not p:
+ continue
+
+ if p[0] == "!":
+ pattern_exclude.append(p)
+ elif p[0] == "&":
+ pattern_intersection.append(p)
+ else:
+ pattern_regular.append(p)
+
+ # if no regular pattern was given, hence only exclude and/or intersection
+ # make that magically work
+ if pattern_regular == []:
+ pattern_regular = ['all']
+
+ # when applying the host selectors, run those without the "&" or "!"
+ # first, then the &s, then the !s.
+ return pattern_regular + pattern_intersection + pattern_exclude
+
+
+def split_host_pattern(pattern):
+ """
+ Takes a string containing host patterns separated by commas (or a list
+ thereof) and returns a list of single patterns (which may not contain
+ commas). Whitespace is ignored.
+
+ Also accepts ':' as a separator for backwards compatibility, but it is
+ not recommended due to the conflict with IPv6 addresses and host ranges.
+
+ Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
+ """
+
+ if isinstance(pattern, list):
+ results = (split_host_pattern(p) for p in pattern)
+ # flatten the results
+ return list(itertools.chain.from_iterable(results))
+ elif not isinstance(pattern, string_types):
+ pattern = to_text(pattern, errors='surrogate_or_strict')
+
+ # If it's got commas in it, we'll treat it as a straightforward
+ # comma-separated list of patterns.
+ if u',' in pattern:
+ patterns = pattern.split(u',')
+
+ # If it doesn't, it could still be a single pattern. This accounts for
+ # non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
+ else:
+ try:
+ (base, port) = parse_address(pattern, allow_ranges=True)
+ patterns = [pattern]
+ except Exception:
+ # The only other case we accept is a ':'-separated list of patterns.
+ # This mishandles IPv6 addresses, and is retained only for backwards
+ # compatibility.
+ patterns = re.findall(
+ to_text(r'''(?: # We want to match something comprising:
+ [^\s:\[\]] # (anything other than whitespace or ':[]'
+ | # ...or...
+ \[[^\]]*\] # a single complete bracketed expression)
+ )+ # occurring once or more
+ '''), pattern, re.X
+ )
+
+ return [p.strip() for p in patterns if p.strip()]
+
+
+class InventoryManager(object):
+ ''' Creates and manages inventory '''
+
+ def __init__(self, loader, sources=None, parse=True, cache=True):
+
+ # base objects
+ self._loader = loader
+ self._inventory = InventoryData()
+
+ # a list of host(names) to contain current inquiries to
+ self._restriction = None
+ self._subset = None
+
+ # caches
+ self._hosts_patterns_cache = {} # resolved full patterns
+ self._pattern_cache = {} # resolved individual patterns
+
+ # the inventory dirs, files, script paths or lists of hosts
+ if sources is None:
+ self._sources = []
+ elif isinstance(sources, string_types):
+ self._sources = [sources]
+ else:
+ self._sources = sources
+
+ # get to work!
+ if parse:
+ self.parse_sources(cache=cache)
+
+ self._cached_dynamic_hosts = []
+ self._cached_dynamic_grouping = []
+
+ @property
+ def localhost(self):
+ return self._inventory.get_host('localhost')
+
+ @property
+ def groups(self):
+ return self._inventory.groups
+
+ @property
+ def hosts(self):
+ return self._inventory.hosts
+
+ def add_host(self, host, group=None, port=None):
+ return self._inventory.add_host(host, group, port)
+
+ def add_group(self, group):
+ return self._inventory.add_group(group)
+
+ def get_groups_dict(self):
+ return self._inventory.get_groups_dict()
+
+ def reconcile_inventory(self):
+ self.clear_caches()
+ return self._inventory.reconcile_inventory()
+
+ def get_host(self, hostname):
+ return self._inventory.get_host(hostname)
+
+ def _fetch_inventory_plugins(self):
+ ''' sets up loaded inventory plugins for usage '''
+
+ display.vvvv('setting up inventory plugins')
+
+ plugins = []
+ for name in C.INVENTORY_ENABLED:
+ plugin = inventory_loader.get(name)
+ if plugin:
+ plugins.append(plugin)
+ else:
+ display.warning('Failed to load inventory plugin, skipping %s' % name)
+
+ if not plugins:
+ raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one enabled.")
+
+ return plugins
+
+ def parse_sources(self, cache=False):
+ ''' iterate over inventory sources and parse each one to populate it'''
+
+ parsed = False
+ # allow for multiple inventory parsing
+ for source in self._sources:
+
+ if source:
+ if ',' not in source:
+ source = unfrackpath(source, follow=False)
+ parse = self.parse_source(source, cache=cache)
+ if parse and not parsed:
+ parsed = True
+
+ if parsed:
+ # do post processing
+ self._inventory.reconcile_inventory()
+ else:
+ if C.INVENTORY_UNPARSED_IS_FAILED:
+ raise AnsibleError("No inventory was parsed, please check your configuration and options.")
+ elif C.INVENTORY_UNPARSED_WARNING:
+ display.warning("No inventory was parsed, only implicit localhost is available")
+
+ for group in self.groups.values():
+ group.vars = combine_vars(group.vars, get_vars_from_inventory_sources(self._loader, self._sources, [group], 'inventory'))
+ for host in self.hosts.values():
+ host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory'))
+
+ def parse_source(self, source, cache=False):
+ ''' Generate or update inventory for the source provided '''
+
+ parsed = False
+ failures = []
+ display.debug(u'Examining possible inventory source: %s' % source)
+
+ # use binary for path functions
+ b_source = to_bytes(source)
+
+ # process directories as a collection of inventories
+ if os.path.isdir(b_source):
+ display.debug(u'Searching for inventory files in directory: %s' % source)
+ for i in sorted(os.listdir(b_source)):
+
+ display.debug(u'Considering %s' % i)
+ # Skip hidden files and stuff we explicitly ignore
+ if IGNORED.search(i):
+ continue
+
+ # recursively deal with directory entries
+ fullpath = to_text(os.path.join(b_source, i), errors='surrogate_or_strict')
+ parsed_this_one = self.parse_source(fullpath, cache=cache)
+ display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
+ if not parsed:
+ parsed = parsed_this_one
+ else:
+ # left with strings or files, let plugins figure it out
+
+ # set so new hosts can use for inventory_file/dir vars
+ self._inventory.current_source = source
+
+ # try source with each plugin
+ for plugin in self._fetch_inventory_plugins():
+
+ plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
+ display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
+
+ # initialize and figure out if plugin wants to attempt parsing this file
+ try:
+ plugin_wants = bool(plugin.verify_file(source))
+ except Exception:
+ plugin_wants = False
+
+ if plugin_wants:
+ try:
+ # FIXME in case plugin fails 1/2 way we have partial inventory
+ plugin.parse(self._inventory, self._loader, source, cache=cache)
+ try:
+ plugin.update_cache_if_changed()
+ except AttributeError:
+ # some plugins might not implement caching
+ pass
+ parsed = True
+ display.vvv('Parsed %s inventory source with %s plugin' % (source, plugin_name))
+ break
+ except AnsibleParserError as e:
+ display.debug('%s was not parsable by %s' % (source, plugin_name))
+ tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': e, 'tb': tb})
+ except Exception as e:
+ display.debug('%s failed while attempting to parse %s' % (plugin_name, source))
+ tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e), 'tb': tb})
+ else:
+ display.vvv("%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source))
+
+ if parsed:
+ self._inventory.processed_sources.append(self._inventory.current_source)
+ else:
+ # only warn/error if NOT using the default or using it and the file is present
+ # TODO: handle 'non file' inventory and detect vs hardcode default
+ if source != '/etc/ansible/hosts' or os.path.exists(source):
+
+ if failures:
+ # only if no plugin processed files should we show errors.
+ for fail in failures:
+ display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc'])))
+ if 'tb' in fail:
+ display.vvv(to_text(fail['tb']))
+
+ # final error/warning on inventory source failure
+ if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
+ raise AnsibleError(u'Completely failed to parse inventory source %s' % (source))
+ else:
+ display.warning("Unable to parse %s as an inventory source" % source)
+
+ # clear up, jic
+ self._inventory.current_source = None
+
+ return parsed
+
+ def clear_caches(self):
+ ''' clear all caches '''
+ self._hosts_patterns_cache = {}
+ self._pattern_cache = {}
+
+ def refresh_inventory(self):
+ ''' recalculate inventory '''
+
+ self.clear_caches()
+ self._inventory = InventoryData()
+ self.parse_sources(cache=False)
+ for host in self._cached_dynamic_hosts:
+ self.add_dynamic_host(host, {'refresh': True})
+ for host, result in self._cached_dynamic_grouping:
+ result['refresh'] = True
+ self.add_dynamic_group(host, result)
+
+ def _match_list(self, items, pattern_str):
+ # compile patterns
+ try:
+ if not pattern_str[0] == '~':
+ pattern = re.compile(fnmatch.translate(pattern_str))
+ else:
+ pattern = re.compile(pattern_str[1:])
+ except Exception:
+ raise AnsibleError('Invalid host list pattern: %s' % pattern_str)
+
+ # apply patterns
+ results = []
+ for item in items:
+ if pattern.match(item):
+ results.append(item)
+ return results
+
+ def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
+ """
+ Takes a pattern or list of patterns and returns a list of matching
+ inventory host names, taking into account any active restrictions
+ or applied subsets
+ """
+
+ hosts = []
+
+ # Check if pattern already computed
+ if isinstance(pattern, list):
+ pattern_list = pattern[:]
+ else:
+ pattern_list = [pattern]
+
+ if pattern_list:
+ if not ignore_limits and self._subset:
+ pattern_list.extend(self._subset)
+
+ if not ignore_restrictions and self._restriction:
+ pattern_list.extend(self._restriction)
+
+ # This is only used as a hash key in the self._hosts_patterns_cache dict
+ # a tuple is faster than stringifying
+ pattern_hash = tuple(pattern_list)
+
+ if pattern_hash not in self._hosts_patterns_cache:
+
+ patterns = split_host_pattern(pattern)
+ hosts = self._evaluate_patterns(patterns)
+
+ # mainly useful for hostvars[host] access
+ if not ignore_limits and self._subset:
+ # exclude hosts not in a subset, if defined
+ subset_uuids = set(s._uuid for s in self._evaluate_patterns(self._subset))
+ hosts = [h for h in hosts if h._uuid in subset_uuids]
+
+ if not ignore_restrictions and self._restriction:
+ # exclude hosts mentioned in any restriction (ex: failed hosts)
+ hosts = [h for h in hosts if h.name in self._restriction]
+
+ self._hosts_patterns_cache[pattern_hash] = deduplicate_list(hosts)
+
+ # sort hosts list if needed (should only happen when called from strategy)
+ if order in ['sorted', 'reverse_sorted']:
+ hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
+ elif order == 'reverse_inventory':
+ hosts = self._hosts_patterns_cache[pattern_hash][::-1]
+ else:
+ hosts = self._hosts_patterns_cache[pattern_hash][:]
+ if order == 'shuffle':
+ shuffle(hosts)
+ elif order not in [None, 'inventory']:
+ raise AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
+
+ return hosts
+
+ def _evaluate_patterns(self, patterns):
+ """
+ Takes a list of patterns and returns a list of matching host names,
+ taking into account any negative and intersection patterns.
+ """
+
+ patterns = order_patterns(patterns)
+ hosts = []
+
+ for p in patterns:
+ # avoid resolving a pattern that is a plain host
+ if p in self._inventory.hosts:
+ hosts.append(self._inventory.get_host(p))
+ else:
+ that = self._match_one_pattern(p)
+ if p[0] == "!":
+ that = set(that)
+ hosts = [h for h in hosts if h not in that]
+ elif p[0] == "&":
+ that = set(that)
+ hosts = [h for h in hosts if h in that]
+ else:
+ existing_hosts = set(y.name for y in hosts)
+ hosts.extend([h for h in that if h.name not in existing_hosts])
+ return hosts
+
+ def _match_one_pattern(self, pattern):
+ """
+ Takes a single pattern and returns a list of matching host names.
+ Ignores intersection (&) and exclusion (!) specifiers.
+
+ The pattern may be:
+
+ 1. A regex starting with ~, e.g. '~[abc]*'
+ 2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
+ 3. An ordinary word that matches itself only, e.g. 'foo'
+
+ The pattern is matched using the following rules:
+
+ 1. If it's 'all', it matches all hosts in all groups.
+ 2. Otherwise, for each known group name:
+ (a) if it matches the group name, the results include all hosts
+ in the group or any of its children.
+ (b) otherwise, if it matches any hosts in the group, the results
+ include the matching hosts.
+
+ This means that 'foo*' may match one or more groups (thus including all
+ hosts therein) but also hosts in other groups.
+
+ The built-in groups 'all' and 'ungrouped' are special. No pattern can
+ match these group names (though 'all' behaves as though it matches, as
+ described above). The word 'ungrouped' can match a host of that name,
+ and patterns like 'ungr*' and 'al*' can match either hosts or groups
+ other than all and ungrouped.
+
+ If the pattern matches one or more group names according to these rules,
+ it may have an optional range suffix to select a subset of the results.
+ This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
+ not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
+ would work if 'foo*' matched the name of one or more groups.
+
+ Duplicate matches are always eliminated from the results.
+ """
+
+ if pattern[0] in ("&", "!"):
+ pattern = pattern[1:]
+
+ if pattern not in self._pattern_cache:
+ (expr, slice) = self._split_subscript(pattern)
+ hosts = self._enumerate_matches(expr)
+ try:
+ hosts = self._apply_subscript(hosts, slice)
+ except IndexError:
+ raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
+ self._pattern_cache[pattern] = hosts
+
+ return self._pattern_cache[pattern]
+
+ def _split_subscript(self, pattern):
+ """
+ Takes a pattern, checks if it has a subscript, and returns the pattern
+ without the subscript and a (start,end) tuple representing the given
+ subscript (or None if there is no subscript).
+
+ Validates that the subscript is in the right syntax, but doesn't make
+ sure the actual indices make sense in context.
+ """
+
+ # Do not parse regexes for enumeration info
+ if pattern[0] == '~':
+ return (pattern, None)
+
+ # We want a pattern followed by an integer or range subscript.
+ # (We can't be more restrictive about the expression because the
+ # fnmatch semantics permit [\[:\]] to occur.)
+
+ subscript = None
+ m = PATTERN_WITH_SUBSCRIPT.match(pattern)
+ if m:
+ (pattern, idx, start, sep, end) = m.groups()
+ if idx:
+ subscript = (int(idx), None)
+ else:
+ if not end:
+ end = -1
+ subscript = (int(start), int(end))
+ if sep == '-':
+ display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
+
+ return (pattern, subscript)
+
+ def _apply_subscript(self, hosts, subscript):
+ """
+ Takes a list of hosts and a (start,end) tuple and returns the subset of
+ hosts based on the subscript (which may be None to return all hosts).
+ """
+
+ if not hosts or not subscript:
+ return hosts
+
+ (start, end) = subscript
+
+ if end:
+ if end == -1:
+ end = len(hosts) - 1
+ return hosts[start:end + 1]
+ else:
+ return [hosts[start]]
+
+ def _enumerate_matches(self, pattern):
+ """
+ Returns a list of host names matching the given pattern according to the
+ rules explained above in _match_one_pattern.
+ """
+
+ results = []
+ # check if pattern matches group
+ matching_groups = self._match_list(self._inventory.groups, pattern)
+ if matching_groups:
+ for groupname in matching_groups:
+ results.extend(self._inventory.groups[groupname].get_hosts())
+
+ # check hosts if no groups matched or it is a regex/glob pattern
+ if not matching_groups or pattern[0] == '~' or any(special in pattern for special in ('.', '?', '*', '[')):
+ # pattern might match host
+ matching_hosts = self._match_list(self._inventory.hosts, pattern)
+ if matching_hosts:
+ for hostname in matching_hosts:
+ results.append(self._inventory.hosts[hostname])
+
+ if not results and pattern in C.LOCALHOST:
+ # get_host autocreates implicit when needed
+ implicit = self._inventory.get_host(pattern)
+ if implicit:
+ results.append(implicit)
+
+ # Display warning if specified host pattern did not match any groups or hosts
+ if not results and not matching_groups and pattern != 'all':
+ msg = "Could not match supplied host pattern, ignoring: %s" % pattern
+ display.debug(msg)
+ if C.HOST_PATTERN_MISMATCH == 'warning':
+ display.warning(msg)
+ elif C.HOST_PATTERN_MISMATCH == 'error':
+ raise AnsibleError(msg)
+ # no need to write 'ignore' state
+
+ return results
+
+ def list_hosts(self, pattern="all"):
+ """ return a list of hostnames for a pattern """
+ # FIXME: cache?
+ result = self.get_hosts(pattern)
+
+ # allow implicit localhost if pattern matches and no other results
+ if len(result) == 0 and pattern in C.LOCALHOST:
+ result = [pattern]
+
+ return result
+
+ def list_groups(self):
+ # FIXME: cache?
+ return sorted(self._inventory.groups.keys())
+
+ def restrict_to_hosts(self, restriction):
+ """
+ Restrict list operations to the hosts given in restriction. This is used
+ to batch serial operations in main playbook code, don't use this for other
+ reasons.
+ """
+ if restriction is None:
+ return
+ elif not isinstance(restriction, list):
+ restriction = [restriction]
+ self._restriction = set(to_text(h.name) for h in restriction)
+
+ def subset(self, subset_pattern):
+ """
+ Limits inventory results to a subset of inventory that matches a given
+ pattern, such as to select a given geographic of numeric slice amongst
+ a previous 'hosts' selection that only select roles, or vice versa.
+ Corresponds to --limit parameter to ansible-playbook
+ """
+ if subset_pattern is None:
+ self._subset = None
+ else:
+ subset_patterns = split_host_pattern(subset_pattern)
+ results = []
+ # allow Unix style @filename data
+ for x in subset_patterns:
+ if not x:
+ continue
+
+ if x[0] == "@":
+ b_limit_file = to_bytes(x[1:])
+ if not os.path.exists(b_limit_file):
+ raise AnsibleError(u'Unable to find limit file %s' % b_limit_file)
+ if not os.path.isfile(b_limit_file):
+ raise AnsibleError(u'Limit starting with "@" must be a file, not a directory: %s' % b_limit_file)
+ with open(b_limit_file) as fd:
+ results.extend([to_text(l.strip()) for l in fd.read().split("\n")])
+ else:
+ results.append(to_text(x))
+ self._subset = results
+
+ def remove_restriction(self):
+ """ Do not restrict list operations """
+ self._restriction = None
+
+ def clear_pattern_cache(self):
+ self._pattern_cache = {}
+
+ def add_dynamic_host(self, host_info, result_item):
+ '''
+ Helper function to add a new host to inventory based on a task result.
+ '''
+
+ changed = False
+ if not result_item.get('refresh'):
+ self._cached_dynamic_hosts.append(host_info)
+
+ if host_info:
+ host_name = host_info.get('host_name')
+
+ # Check if host in inventory, add if not
+ if host_name not in self.hosts:
+ self.add_host(host_name, 'all')
+ changed = True
+ new_host = self.hosts.get(host_name)
+
+ # Set/update the vars for this host
+ new_host_vars = new_host.get_vars()
+ new_host_combined_vars = combine_vars(new_host_vars, host_info.get('host_vars', dict()))
+ if new_host_vars != new_host_combined_vars:
+ new_host.vars = new_host_combined_vars
+ changed = True
+
+ new_groups = host_info.get('groups', [])
+ for group_name in new_groups:
+ if group_name not in self.groups:
+ group_name = self._inventory.add_group(group_name)
+ changed = True
+ new_group = self.groups[group_name]
+ if new_group.add_host(self.hosts[host_name]):
+ changed = True
+
+ # reconcile inventory, ensures inventory rules are followed
+ if changed:
+ self.reconcile_inventory()
+
+ result_item['changed'] = changed
+
+ def add_dynamic_group(self, host, result_item):
+ '''
+ Helper function to add a group (if it does not exist), and to assign the
+ specified host to that group.
+ '''
+
+ changed = False
+
+ if not result_item.get('refresh'):
+ self._cached_dynamic_grouping.append((host, result_item))
+
+ # the host here is from the executor side, which means it was a
+ # serialized/cloned copy and we'll need to look up the proper
+ # host object from the master inventory
+ real_host = self.hosts.get(host.name)
+ if real_host is None:
+ if host.name == self.localhost.name:
+ real_host = self.localhost
+ elif not result_item.get('refresh'):
+ raise AnsibleError('%s cannot be matched in inventory' % host.name)
+ else:
+ # host was removed from inventory during refresh, we should not process
+ return
+
+ group_name = result_item.get('add_group')
+ parent_group_names = result_item.get('parent_groups', [])
+
+ if group_name not in self.groups:
+ group_name = self.add_group(group_name)
+
+ for name in parent_group_names:
+ if name not in self.groups:
+ # create the new group and add it to inventory
+ self.add_group(name)
+ changed = True
+
+ group = self._inventory.groups[group_name]
+ for parent_group_name in parent_group_names:
+ parent_group = self.groups[parent_group_name]
+ new = parent_group.add_child_group(group)
+ if new and not changed:
+ changed = True
+
+ if real_host not in group.get_hosts():
+ changed = group.add_host(real_host)
+
+ if group not in real_host.get_groups():
+ changed = real_host.add_group(group)
+
+ if changed:
+ self.reconcile_inventory()
+
+ result_item['changed'] = changed
diff --git a/lib/ansible/keyword_desc.yml b/lib/ansible/keyword_desc.yml
new file mode 100644
index 0000000..1e8d844
--- /dev/null
+++ b/lib/ansible/keyword_desc.yml
@@ -0,0 +1,70 @@
+accelerate: "*DEPRECATED*, set to True to use accelerate connection plugin."
+accelerate_ipv6: "*DEPRECATED*, set to True to force accelerate plugin to use ipv6 for its connection."
+accelerate_port: "*DEPRECATED*, set to override default port use for accelerate connection."
+action: "The 'action' to execute for a task, it normally translates into a C(module) or action plugin."
+args: "A secondary way to add arguments into a task. Takes a dictionary in which keys map to options and values."
+always: List of tasks, in a block, that execute no matter if there is an error in the block or not.
+any_errors_fatal: Force any un-handled task errors on any host to propagate to all hosts and end the play.
+async: Run a task asynchronously if the C(action) supports this; value is maximum runtime in seconds.
+become: Boolean that controls if privilege escalation is used or not on :term:`Task` execution. Implemented by the become plugin. See :ref:`become_plugins`.
+become_exe: Path to the executable used to elevate privileges. Implemented by the become plugin. See :ref:`become_plugins`.
+become_flags: A string of flag(s) to pass to the privilege escalation program when :term:`become` is True.
+become_method: Which method of privilege escalation to use (such as sudo or su).
+become_user: "User that you 'become' after using privilege escalation. The remote/login user must have permissions to become this user."
+block: List of tasks in a block.
+changed_when: "Conditional expression that overrides the task's normal 'changed' status."
+check_mode: A boolean that controls if a task is executed in 'check' mode. See :ref:`check_mode_dry`.
+collections: |
+ List of collection namespaces to search for modules, plugins, and roles. See :ref:`collections_using_playbook`
+
+ .. note::
+
+ Tasks within a role do not inherit the value of ``collections`` from the play. To have a role search a list of collections, use the ``collections`` keyword in ``meta/main.yml`` within a role.
+
+
+connection: Allows you to change the connection plugin used for tasks to execute on the target. See :ref:`using_connection`.
+debugger: Enable debugging tasks based on state of the task result. See :ref:`playbook_debugger`.
+delay: Number of seconds to delay between retries. This setting is only used in combination with :term:`until`.
+delegate_facts: Boolean that allows you to apply facts to a delegated host instead of inventory_hostname.
+delegate_to: Host to execute task instead of the target (inventory_hostname). Connection vars from the delegated host will also be used for the task.
+diff: "Toggle to make tasks return 'diff' information or not."
+environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This isn't supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data.
+fact_path: Set the fact path option for the fact gathering plugin controlled by :term:`gather_facts`.
+failed_when: "Conditional expression that overrides the task's normal 'failed' status."
+force_handlers: Will force notified handler execution for hosts even if they failed during the play. Will not trigger if the play itself fails.
+gather_facts: "A boolean that controls if the play will automatically run the 'setup' task to gather facts for the hosts."
+gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`.
+gather_timeout: Allows you to set the timeout for the fact gathering plugin controlled by :term:`gather_facts`.
+handlers: "A section with tasks that are treated as handlers, these won't get executed normally, only when notified after each section of tasks is complete. A handler's `listen` field is not templatable."
+hosts: "A list of groups, hosts or host pattern that translates into a list of hosts that are the play's target."
+ignore_errors: Boolean that allows you to ignore task failures and continue with play. It does not affect connection errors.
+ignore_unreachable: Boolean that allows you to ignore task failures due to an unreachable host and continue with the play. This does not affect other task errors (see :term:`ignore_errors`) but is useful for groups of volatile/ephemeral hosts.
+loop: "Takes a list for the task to iterate over, saving each list element into the ``item`` variable (configurable via loop_control)"
+loop_control: Several keys here allow you to modify/set loop behaviour in a task. See :ref:`loop_control`.
+max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed. This only works on linear or linear derived strategies.
+module_defaults: Specifies default parameter values for modules.
+name: "Identifier. Can be used for documentation, or in tasks/handlers."
+no_log: Boolean that controls information disclosure.
+notify: "List of handlers to notify when the task returns a 'changed=True' status."
+order: Controls the sorting of hosts as they are used for executing the play. Possible values are inventory (default), sorted, reverse_sorted, reverse_inventory and shuffle.
+poll: Sets the polling interval in seconds for async tasks (default 10s).
+port: Used to override the default port used in a connection.
+post_tasks: A list of tasks to execute after the :term:`tasks` section.
+pre_tasks: A list of tasks to execute before :term:`roles`.
+remote_user: User used to log into the target via the connection plugin.
+register: Name of variable that will contain task status and module return data.
+rescue: List of tasks in a :term:`block` that run if there is a task error in the main :term:`block` list.
+retries: "Number of retries before giving up in a :term:`until` loop. This setting is only used in combination with :term:`until`."
+roles: List of roles to be imported into the play
+run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterwards apply any results and facts to all active hosts in the same batch.
+serial: Explicitly define how Ansible batches the execution of the current play on the play's target. See :ref:`rolling_update_batch_size`.
+strategy: Allows you to choose the connection plugin to use for the play.
+tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line.
+tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`.
+timeout: Time limit for task to execute in, if exceeded Ansible will interrupt and fail the task.
+throttle: Limit number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel.
+until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit."
+vars: Dictionary/map of variables
+vars_files: List of files that contain vars to include in the play.
+vars_prompt: list of variables to prompt for.
+when: Conditional expression, determines if an iteration of a task is run or not.
diff --git a/lib/ansible/module_utils/__init__.py b/lib/ansible/module_utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/__init__.py
diff --git a/lib/ansible/module_utils/_text.py b/lib/ansible/module_utils/_text.py
new file mode 100644
index 0000000..6cd7721
--- /dev/null
+++ b/lib/ansible/module_utils/_text.py
@@ -0,0 +1,15 @@
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+.. warn:: Use ansible.module_utils.common.text.converters instead.
+"""
+
+# Backwards compat for people still calling it from this package
+import codecs
+
+from ansible.module_utils.six import PY3, text_type, binary_type
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
diff --git a/lib/ansible/module_utils/ansible_release.py b/lib/ansible/module_utils/ansible_release.py
new file mode 100644
index 0000000..66a04b9
--- /dev/null
+++ b/lib/ansible/module_utils/ansible_release.py
@@ -0,0 +1,24 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__version__ = '2.14.3'
+__author__ = 'Ansible, Inc.'
+__codename__ = "C'mon Everybody"
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
new file mode 100644
index 0000000..e780ec6
--- /dev/null
+++ b/lib/ansible/module_utils/api.py
@@ -0,0 +1,166 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2015, Brian Coca, <bcoca@ansible.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""
+This module adds shared support for generic api modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+The 'api' module provides the following common argument specs:
+
+ * rate limit spec
+ - rate: number of requests per time unit (int)
+ - rate_limit: time window in which the limit is applied in seconds
+
+ * retry spec
+ - retries: number of attempts
+ - retry_pause: delay between attempts in seconds
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+import random
+import sys
+import time
+
+
+def rate_limit_argument_spec(spec=None):
+ """Creates an argument spec for working with rate limiting"""
+ arg_spec = (dict(
+ rate=dict(type='int'),
+ rate_limit=dict(type='int'),
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def retry_argument_spec(spec=None):
+ """Creates an argument spec for working with retrying"""
+ arg_spec = (dict(
+ retries=dict(type='int'),
+ retry_pause=dict(type='float', default=1),
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def basic_auth_argument_spec(spec=None):
+ arg_spec = (dict(
+ api_username=dict(type='str'),
+ api_password=dict(type='str', no_log=True),
+ api_url=dict(type='str'),
+ validate_certs=dict(type='bool', default=True)
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def rate_limit(rate=None, rate_limit=None):
+ """rate limiting decorator"""
+ minrate = None
+ if rate is not None and rate_limit is not None:
+ minrate = float(rate_limit) / float(rate)
+
+ def wrapper(f):
+ last = [0.0]
+
+ def ratelimited(*args, **kwargs):
+ if sys.version_info >= (3, 8):
+ real_time = time.process_time
+ else:
+ real_time = time.clock
+ if minrate is not None:
+ elapsed = real_time() - last[0]
+ left = minrate - elapsed
+ if left > 0:
+ time.sleep(left)
+ last[0] = real_time()
+ ret = f(*args, **kwargs)
+ return ret
+
+ return ratelimited
+ return wrapper
+
+
+def retry(retries=None, retry_pause=1):
+ """Retry decorator"""
+ def wrapper(f):
+
+ def retried(*args, **kwargs):
+ retry_count = 0
+ if retries is not None:
+ ret = None
+ while True:
+ retry_count += 1
+ if retry_count >= retries:
+ raise Exception("Retry limit exceeded: %d" % retries)
+ try:
+ ret = f(*args, **kwargs)
+ except Exception:
+ pass
+ if ret:
+ break
+ time.sleep(retry_pause)
+ return ret
+
+ return retried
+ return wrapper
+
+
+def generate_jittered_backoff(retries=10, delay_base=3, delay_threshold=60):
+ """The "Full Jitter" backoff strategy.
+
+ Ref: https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ :param retries: The number of delays to generate.
+ :param delay_base: The base time in seconds used to calculate the exponential backoff.
+ :param delay_threshold: The maximum time in seconds for any delay.
+ """
+ for retry in range(0, retries):
+ yield random.randint(0, min(delay_threshold, delay_base * 2 ** retry))
+
+
+def retry_never(exception_or_result):
+ return False
+
+
+def retry_with_delays_and_condition(backoff_iterator, should_retry_error=None):
+ """Generic retry decorator.
+
+ :param backoff_iterator: An iterable of delays in seconds.
+ :param should_retry_error: A callable that takes an exception of the decorated function and decides whether to retry or not (returns a bool).
+ """
+ if should_retry_error is None:
+ should_retry_error = retry_never
+
+ def function_wrapper(function):
+ @functools.wraps(function)
+ def run_function(*args, **kwargs):
+ """This assumes the function has not already been called.
+ If backoff_iterator is empty, we should still run the function a single time with no delay.
+ """
+ call_retryable_function = functools.partial(function, *args, **kwargs)
+
+ for delay in backoff_iterator:
+ try:
+ return call_retryable_function()
+ except Exception as e:
+ if not should_retry_error(e):
+ raise
+ time.sleep(delay)
+
+ # Only or final attempt
+ return call_retryable_function()
+ return run_function
+ return function_wrapper
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
new file mode 100644
index 0000000..67be924
--- /dev/null
+++ b/lib/ansible/module_utils/basic.py
@@ -0,0 +1,2148 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+FILE_ATTRIBUTES = {
+ 'A': 'noatime',
+ 'a': 'append',
+ 'c': 'compressed',
+ 'C': 'nocow',
+ 'd': 'nodump',
+ 'D': 'dirsync',
+ 'e': 'extents',
+ 'E': 'encrypted',
+ 'h': 'blocksize',
+ 'i': 'immutable',
+ 'I': 'indexed',
+ 'j': 'journalled',
+ 'N': 'inline',
+ 's': 'zero',
+ 'S': 'synchronous',
+ 't': 'notail',
+ 'T': 'blockroot',
+ 'u': 'undelete',
+ 'X': 'compressedraw',
+ 'Z': 'compresseddirty',
+}
+
+# Ansible modules can be written in any language.
+# The functions available here can be used to do many common tasks,
+# to simplify development of Python modules.
+
+import __main__
+import atexit
+import errno
+import datetime
+import grp
+import fcntl
+import locale
+import os
+import pwd
+import platform
+import re
+import select
+import shlex
+import shutil
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import types
+
+from itertools import chain, repeat
+
+try:
+ import syslog
+ HAS_SYSLOG = True
+except ImportError:
+ HAS_SYSLOG = False
+
+try:
+ from systemd import journal, daemon as systemd_daemon
+ # Makes sure that systemd.journal has method sendv()
+ # Double check that journal has method sendv (some packages don't)
+ # check if the system is running under systemd
+ has_journal = hasattr(journal, 'sendv') and systemd_daemon.booted()
+except (ImportError, AttributeError):
+ # AttributeError would be caused from use of .booted() if wrong systemd
+ has_journal = False
+
+HAVE_SELINUX = False
+try:
+ from ansible.module_utils.compat import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ pass
+
+# Python2 & 3 way to get NoneType
+NoneType = type(None)
+
+from ansible.module_utils.compat import selectors
+
+from ._text import to_native, to_bytes, to_text
+from ansible.module_utils.common.text.converters import (
+ jsonify,
+ container_to_bytes as json_dict_unicode_to_bytes,
+ container_to_text as json_dict_bytes_to_unicode,
+)
+
+from ansible.module_utils.common.arg_spec import ModuleArgumentSpecValidator
+
+from ansible.module_utils.common.text.formatters import (
+ lenient_lowercase,
+ bytes_to_human,
+ human_to_bytes,
+ SIZE_RANGES,
+)
+
+try:
+ from ansible.module_utils.common._json_compat import json
+except ImportError as e:
+ print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
+ sys.exit(1)
+
+
+AVAILABLE_HASH_ALGORITHMS = dict()
+try:
+ import hashlib
+
+ # python 2.7.9+ and 2.7.0+
+ for attribute in ('available_algorithms', 'algorithms'):
+ algorithms = getattr(hashlib, attribute, None)
+ if algorithms:
+ break
+ if algorithms is None:
+ # python 2.5+
+ algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
+ for algorithm in algorithms:
+ AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
+
+ # we may have been able to import md5 but it could still not be available
+ try:
+ hashlib.md5()
+ except ValueError:
+ AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
+except Exception:
+ import sha
+ AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
+ try:
+ import md5
+ AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
+ except Exception:
+ pass
+
+from ansible.module_utils.common._collections_compat import (
+ KeysView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+)
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.file import (
+ _PERM_BITS as PERM_BITS,
+ _EXEC_PERM_BITS as EXEC_PERM_BITS,
+ _DEFAULT_PERM as DEFAULT_PERM,
+ is_executable,
+ format_attributes,
+ get_flags_from_attributes,
+)
+from ansible.module_utils.common.sys_info import (
+ get_distribution,
+ get_distribution_version,
+ get_platform_subclass,
+)
+from ansible.module_utils.pycompat24 import get_exception, literal_eval
+from ansible.module_utils.common.parameters import (
+ env_fallback,
+ remove_values,
+ sanitize_keys,
+ DEFAULT_TYPE_VALIDATORS,
+ PASS_VARS,
+ PASS_BOOLS,
+)
+
+from ansible.module_utils.errors import AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, UnsupportedError
+from ansible.module_utils.six import (
+ PY2,
+ PY3,
+ b,
+ binary_type,
+ integer_types,
+ iteritems,
+ string_types,
+ text_type,
+)
+from ansible.module_utils.six.moves import map, reduce, shlex_quote
+from ansible.module_utils.common.validation import (
+ check_missing_parameters,
+ safe_eval,
+)
+from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
+from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
+from ansible.module_utils.common.warnings import (
+ deprecate,
+ get_deprecation_messages,
+ get_warning_messages,
+ warn,
+)
+
+# Note: When getting Sequence from collections, it matches with strings. If
+# this matters, make sure to check for strings before checking for sequencetype
+SEQUENCETYPE = frozenset, KeysView, Sequence
+
+PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
+
+imap = map
+
+try:
+ # Python 2
+ unicode # type: ignore[has-type] # pylint: disable=used-before-assignment
+except NameError:
+ # Python 3
+ unicode = text_type
+
+try:
+ # Python 2
+ basestring # type: ignore[has-type] # pylint: disable=used-before-assignment
+except NameError:
+ # Python 3
+ basestring = string_types
+
+_literal_eval = literal_eval
+
+# End of deprecated names
+
+# Internal global holding passed in params. This is consulted in case
+# multiple AnsibleModules are created. Otherwise each AnsibleModule would
+# attempt to read from stdin. Other code should not use this directly as it
+# is an internal implementation detail
+_ANSIBLE_ARGS = None
+
+
+FILE_COMMON_ARGUMENTS = dict(
+ # These are things we want. About setting metadata (mode, ownership, permissions in general) on
+ # created files (these are used by set_fs_attributes_if_different and included in
+ # load_file_common_arguments)
+ mode=dict(type='raw'),
+ owner=dict(type='str'),
+ group=dict(type='str'),
+ seuser=dict(type='str'),
+ serole=dict(type='str'),
+ selevel=dict(type='str'),
+ setype=dict(type='str'),
+ attributes=dict(type='str', aliases=['attr']),
+ unsafe_writes=dict(type='bool', default=False, fallback=(env_fallback, ['ANSIBLE_UNSAFE_WRITES'])), # should be available to any module using atomic_move
+)
+
+PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
+
+# Used for parsing symbolic file perms
+MODE_OPERATOR_RE = re.compile(r'[+=-]')
+USERS_RE = re.compile(r'[^ugo]')
+PERMS_RE = re.compile(r'[^rwxXstugo]')
+
+# Used for determining if the system is running a new enough python version
+# and should only restrict on our documented minimum versions
+_PY3_MIN = sys.version_info >= (3, 5)
+_PY2_MIN = (2, 7) <= sys.version_info < (3,)
+_PY_MIN = _PY3_MIN or _PY2_MIN
+if not _PY_MIN:
+ print(
+ '\n{"failed": true, '
+ '"msg": "ansible-core requires a minimum of Python2 version 2.7 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
+ )
+ sys.exit(1)
+
+
+#
+# Deprecated functions
+#
+
+def get_platform():
+ '''
+ **Deprecated** Use :py:func:`platform.system` directly.
+
+ :returns: Name of the platform the module is running on in a native string
+
+ Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
+ the result of calling :py:func:`platform.system`.
+ '''
+ return platform.system()
+
+# End deprecated functions
+
+
+#
+# Compat shims
+#
+
+def load_platform_subclass(cls, *args, **kwargs):
+ """**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
+ platform_cls = get_platform_subclass(cls)
+ return super(cls, platform_cls).__new__(platform_cls)
+
+
+def get_all_subclasses(cls):
+ """**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
+ return list(_get_all_subclasses(cls))
+
+
+# End compat shims
+
+
+def heuristic_log_sanitize(data, no_log_values=None):
+ ''' Remove strings that look like passwords from log messages '''
+ # Currently filters:
+ # user:pass@foo/whatever and http://username:pass@wherever/foo
+ # This code has false positives and consumes parts of logs that are
+ # not passwds
+
+ # begin: start of a passwd containing string
+ # end: end of a passwd containing string
+ # sep: char between user and passwd
+ # prev_begin: where in the overall string to start a search for
+ # a passwd
+ # sep_search_end: where in the string to end a search for the sep
+ data = to_native(data)
+
+ output = []
+ begin = len(data)
+ prev_begin = begin
+ sep = 1
+ while sep:
+ # Find the potential end of a passwd
+ try:
+ end = data.rindex('@', 0, begin)
+ except ValueError:
+ # No passwd in the rest of the data
+ output.insert(0, data[0:begin])
+ break
+
+ # Search for the beginning of a passwd
+ sep = None
+ sep_search_end = end
+ while not sep:
+ # URL-style username+password
+ try:
+ begin = data.rindex('://', 0, sep_search_end)
+ except ValueError:
+ # No url style in the data, check for ssh style in the
+ # rest of the string
+ begin = 0
+ # Search for separator
+ try:
+ sep = data.index(':', begin + 3, end)
+ except ValueError:
+ # No separator; choices:
+ if begin == 0:
+ # Searched the whole string so there's no password
+ # here. Return the remaining data
+ output.insert(0, data[0:prev_begin])
+ break
+ # Search for a different beginning of the password field.
+ sep_search_end = begin
+ continue
+ if sep:
+ # Password was found; remove it.
+ output.insert(0, data[end:prev_begin])
+ output.insert(0, '********')
+ output.insert(0, data[begin:sep + 1])
+ prev_begin = begin
+
+ output = ''.join(output)
+ if no_log_values:
+ output = remove_values(output, no_log_values)
+ return output
+
+
+def _load_params():
+ ''' read the modules parameters and store them globally.
+
+ This function may be needed for certain very dynamic custom modules which
+ want to process the parameters that are being handed the module. Since
+ this is so closely tied to the implementation of modules we cannot
+ guarantee API stability for it (it may change between versions) however we
+ will try not to break it gratuitously. It is certainly more future-proof
+ to call this function and consume its outputs than to implement the logic
+ inside it as a copy in your own code.
+ '''
+ global _ANSIBLE_ARGS
+ if _ANSIBLE_ARGS is not None:
+ buffer = _ANSIBLE_ARGS
+ else:
+ # debug overrides to read args from file or cmdline
+
+ # Avoid tracebacks when locale is non-utf8
+ # We control the args and we pass them as utf8
+ if len(sys.argv) > 1:
+ if os.path.isfile(sys.argv[1]):
+ fd = open(sys.argv[1], 'rb')
+ buffer = fd.read()
+ fd.close()
+ else:
+ buffer = sys.argv[1]
+ if PY3:
+ buffer = buffer.encode('utf-8', errors='surrogateescape')
+ # default case, read from stdin
+ else:
+ if PY2:
+ buffer = sys.stdin.read()
+ else:
+ buffer = sys.stdin.buffer.read()
+ _ANSIBLE_ARGS = buffer
+
+ try:
+ params = json.loads(buffer.decode('utf-8'))
+ except ValueError:
+ # This helper used too early for fail_json to work.
+ print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
+ sys.exit(1)
+
+ if PY2:
+ params = json_dict_unicode_to_bytes(params)
+
+ try:
+ return params['ANSIBLE_MODULE_ARGS']
+ except KeyError:
+ # This helper does not have access to fail_json so we have to print
+ # json output on our own.
+ print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
+ '"failed": true}')
+ sys.exit(1)
+
+
+def missing_required_lib(library, reason=None, url=None):
+ hostname = platform.node()
+ msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
+ if reason:
+ msg += " This is required %s." % reason
+ if url:
+ msg += " See %s for more info." % url
+
+ msg += (" Please read the module documentation and install it in the appropriate location."
+ " If the required library is installed, but Ansible is using the wrong Python interpreter,"
+ " please consult the documentation on ansible_python_interpreter")
+ return msg
+
+
+class AnsibleModule(object):
+ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None,
+ required_one_of=None, add_file_common_args=False,
+ supports_check_mode=False, required_if=None, required_by=None):
+
+ '''
+ Common code for quickly building an ansible module in Python
+ (although you can write modules with anything that can return JSON).
+
+ See :ref:`developing_modules_general` for a general introduction
+ and :ref:`developing_program_flow_modules` for more detailed explanation.
+ '''
+
+ self._name = os.path.basename(__file__) # initialize name until we can parse from options
+ self.argument_spec = argument_spec
+ self.supports_check_mode = supports_check_mode
+ self.check_mode = False
+ self.bypass_checks = bypass_checks
+ self.no_log = no_log
+
+ self.mutually_exclusive = mutually_exclusive
+ self.required_together = required_together
+ self.required_one_of = required_one_of
+ self.required_if = required_if
+ self.required_by = required_by
+ self.cleanup_files = []
+ self._debug = False
+ self._diff = False
+ self._socket_path = None
+ self._shell = None
+ self._syslog_facility = 'LOG_USER'
+ self._verbosity = 0
+ # May be used to set modifications to the environment for any
+ # run_command invocation
+ self.run_command_environ_update = {}
+ self._clean = {}
+ self._string_conversion_action = ''
+
+ self.aliases = {}
+ self._legal_inputs = []
+ self._options_context = list()
+ self._tmpdir = None
+
+ if add_file_common_args:
+ for k, v in FILE_COMMON_ARGUMENTS.items():
+ if k not in self.argument_spec:
+ self.argument_spec[k] = v
+
+ # Save parameter values that should never be logged
+ self.no_log_values = set()
+
+ # check the locale as set by the current environment, and reset to
+ # a known valid (LANG=C) if it's an invalid/unavailable locale
+ self._check_locale()
+
+ self._load_params()
+ self._set_internal_properties()
+
+ self.validator = ModuleArgumentSpecValidator(self.argument_spec,
+ self.mutually_exclusive,
+ self.required_together,
+ self.required_one_of,
+ self.required_if,
+ self.required_by,
+ )
+
+ self.validation_result = self.validator.validate(self.params)
+ self.params.update(self.validation_result.validated_parameters)
+ self.no_log_values.update(self.validation_result._no_log_values)
+ self.aliases.update(self.validation_result._aliases)
+
+ try:
+ error = self.validation_result.errors[0]
+ except IndexError:
+ error = None
+
+ # Fail for validation errors, even in check mode
+ if error:
+ msg = self.validation_result.errors.msg
+ if isinstance(error, UnsupportedError):
+ msg = "Unsupported parameters for ({name}) {kind}: {msg}".format(name=self._name, kind='module', msg=msg)
+
+ self.fail_json(msg=msg)
+
+ if self.check_mode and not self.supports_check_mode:
+ self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
+
+ # This is for backwards compatibility only.
+ self._CHECK_ARGUMENT_TYPES_DISPATCHER = DEFAULT_TYPE_VALIDATORS
+
+ if not self.no_log:
+ self._log_invocation()
+
+ # selinux state caching
+ self._selinux_enabled = None
+ self._selinux_mls_enabled = None
+ self._selinux_initial_context = None
+
+ # finally, make sure we're in a sane working dir
+ self._set_cwd()
+
+ @property
+ def tmpdir(self):
+ # if _ansible_tmpdir was not set and we have a remote_tmp,
+ # the module needs to create it and clean it up once finished.
+ # otherwise we create our own module tmp dir from the system defaults
+ if self._tmpdir is None:
+ basedir = None
+
+ if self._remote_tmp is not None:
+ basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
+
+ if basedir is not None and not os.path.exists(basedir):
+ try:
+ os.makedirs(basedir, mode=0o700)
+ except (OSError, IOError) as e:
+ self.warn("Unable to use %s as temporary directory, "
+ "failing back to system: %s" % (basedir, to_native(e)))
+ basedir = None
+ else:
+ self.warn("Module remote_tmp %s did not exist and was "
+ "created with a mode of 0700, this may cause"
+ " issues when running as another user. To "
+ "avoid this, create the remote_tmp dir with "
+ "the correct permissions manually" % basedir)
+
+ basefile = "ansible-moduletmp-%s-" % time.time()
+ try:
+ tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
+ except (OSError, IOError) as e:
+ self.fail_json(
+ msg="Failed to create remote module tmp path at dir %s "
+ "with prefix %s: %s" % (basedir, basefile, to_native(e))
+ )
+ if not self._keep_remote_files:
+ atexit.register(shutil.rmtree, tmpdir)
+ self._tmpdir = tmpdir
+
+ return self._tmpdir
+
+ def warn(self, warning):
+ warn(warning)
+ self.log('[WARNING] %s' % warning)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ if version is not None and date is not None:
+ raise AssertionError("implementation error -- version and date must not both be set")
+ deprecate(msg, version=version, date=date, collection_name=collection_name)
+ # For compatibility, we accept that neither version nor date is set,
+ # and treat that the same as if version would haven been set
+ if date is not None:
+ self.log('[DEPRECATION WARNING] %s %s' % (msg, date))
+ else:
+ self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
+
+ def load_file_common_arguments(self, params, path=None):
+ '''
+ many modules deal with files, this encapsulates common
+ options that the file module accepts such that it is directly
+ available to all modules and they can share code.
+
+ Allows to overwrite the path/dest module argument by providing path.
+ '''
+
+ if path is None:
+ path = params.get('path', params.get('dest', None))
+ if path is None:
+ return {}
+ else:
+ path = os.path.expanduser(os.path.expandvars(path))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ # if the path is a symlink, and we're following links, get
+ # the target of the link instead for testing
+ if params.get('follow', False) and os.path.islink(b_path):
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path)
+
+ mode = params.get('mode', None)
+ owner = params.get('owner', None)
+ group = params.get('group', None)
+
+ # selinux related options
+ seuser = params.get('seuser', None)
+ serole = params.get('serole', None)
+ setype = params.get('setype', None)
+ selevel = params.get('selevel', None)
+ secontext = [seuser, serole, setype]
+
+ if self.selinux_mls_enabled():
+ secontext.append(selevel)
+
+ default_secontext = self.selinux_default_context(path)
+ for i in range(len(default_secontext)):
+ if i is not None and secontext[i] == '_default':
+ secontext[i] = default_secontext[i]
+
+ attributes = params.get('attributes', None)
+ return dict(
+ path=path, mode=mode, owner=owner, group=group,
+ seuser=seuser, serole=serole, setype=setype,
+ selevel=selevel, secontext=secontext, attributes=attributes,
+ )
+
+ # Detect whether using selinux that is MLS-aware.
+ # While this means you can set the level/range with
+ # selinux.lsetfilecon(), it may or may not mean that you
+ # will get the selevel as part of the context returned
+ # by selinux.lgetfilecon().
+
+ def selinux_mls_enabled(self):
+ if self._selinux_mls_enabled is None:
+ self._selinux_mls_enabled = HAVE_SELINUX and selinux.is_selinux_mls_enabled() == 1
+
+ return self._selinux_mls_enabled
+
+ def selinux_enabled(self):
+ if self._selinux_enabled is None:
+ self._selinux_enabled = HAVE_SELINUX and selinux.is_selinux_enabled() == 1
+
+ return self._selinux_enabled
+
+ # Determine whether we need a placeholder for selevel/mls
+ def selinux_initial_context(self):
+ if self._selinux_initial_context is None:
+ self._selinux_initial_context = [None, None, None]
+ if self.selinux_mls_enabled():
+ self._selinux_initial_context.append(None)
+
+ return self._selinux_initial_context
+
+ # If selinux fails to find a default, return an array of None
+ def selinux_default_context(self, path, mode=0):
+ context = self.selinux_initial_context()
+ if not self.selinux_enabled():
+ return context
+ try:
+ ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
+ except OSError:
+ return context
+ if ret[0] == -1:
+ return context
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+ def selinux_context(self, path):
+ context = self.selinux_initial_context()
+ if not self.selinux_enabled():
+ return context
+ try:
+ ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ self.fail_json(path=path, msg='path %s does not exist' % path)
+ else:
+ self.fail_json(path=path, msg='failed to retrieve selinux context')
+ if ret[0] == -1:
+ return context
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+ def user_and_group(self, path, expand=True):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+ st = os.lstat(b_path)
+ uid = st.st_uid
+ gid = st.st_gid
+ return (uid, gid)
+
+ def find_mount_point(self, path):
+ '''
+ Takes a path and returns it's mount point
+
+ :param path: a string type with a filesystem path
+ :returns: the path to the mount point as a text type
+ '''
+
+ b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
+ while not os.path.ismount(b_path):
+ b_path = os.path.dirname(b_path)
+
+ return to_text(b_path, errors='surrogate_or_strict')
+
+ def is_special_selinux_path(self, path):
+ """
+ Returns a tuple containing (True, selinux_context) if the given path is on a
+ NFS or other 'special' fs mount point, otherwise the return will be (False, None).
+ """
+ try:
+ f = open('/proc/mounts', 'r')
+ mount_data = f.readlines()
+ f.close()
+ except Exception:
+ return (False, None)
+
+ path_mount_point = self.find_mount_point(path)
+
+ for line in mount_data:
+ (device, mount_point, fstype, options, rest) = line.split(' ', 4)
+ if to_bytes(path_mount_point) == to_bytes(mount_point):
+ for fs in self._selinux_special_fs:
+ if fs in fstype:
+ special_context = self.selinux_context(path_mount_point)
+ return (True, special_context)
+
+ return (False, None)
+
+ def set_default_selinux_context(self, path, changed):
+ if not self.selinux_enabled():
+ return changed
+ context = self.selinux_default_context(path)
+ return self.set_context_if_different(path, context, False)
+
+ def set_context_if_different(self, path, context, changed, diff=None):
+
+ if not self.selinux_enabled():
+ return changed
+
+ if self.check_file_absent_if_check_mode(path):
+ return True
+
+ cur_context = self.selinux_context(path)
+ new_context = list(cur_context)
+ # Iterate over the current context instead of the
+ # argument context, which may have selevel.
+
+ (is_special_se, sp_context) = self.is_special_selinux_path(path)
+ if is_special_se:
+ new_context = sp_context
+ else:
+ for i in range(len(cur_context)):
+ if len(context) > i:
+ if context[i] is not None and context[i] != cur_context[i]:
+ new_context[i] = context[i]
+ elif context[i] is None:
+ new_context[i] = cur_context[i]
+
+ if cur_context != new_context:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['secontext'] = cur_context
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['secontext'] = new_context
+
+ try:
+ if self.check_mode:
+ return True
+ rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
+ except OSError as e:
+ self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
+ new_context=new_context, cur_context=cur_context, input_was=context)
+ if rc != 0:
+ self.fail_json(path=path, msg='set selinux context failed')
+ changed = True
+ return changed
+
+ def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
+
+ if owner is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ orig_uid, orig_gid = self.user_and_group(b_path, expand)
+ try:
+ uid = int(owner)
+ except ValueError:
+ try:
+ uid = pwd.getpwnam(owner).pw_uid
+ except KeyError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
+
+ if orig_uid != uid:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['owner'] = orig_uid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['owner'] = uid
+
+ if self.check_mode:
+ return True
+ try:
+ os.lchown(b_path, uid, -1)
+ except (IOError, OSError) as e:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
+ changed = True
+ return changed
+
+ def set_group_if_different(self, path, group, changed, diff=None, expand=True):
+
+ if group is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ orig_uid, orig_gid = self.user_and_group(b_path, expand)
+ try:
+ gid = int(group)
+ except ValueError:
+ try:
+ gid = grp.getgrnam(group).gr_gid
+ except KeyError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
+
+ if orig_gid != gid:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['group'] = orig_gid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['group'] = gid
+
+ if self.check_mode:
+ return True
+ try:
+ os.lchown(b_path, -1, gid)
+ except OSError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chgrp failed')
+ changed = True
+ return changed
+
+ def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
+
+ if mode is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ path_stat = os.lstat(b_path)
+
+ if not isinstance(mode, int):
+ try:
+ mode = int(mode, 8)
+ except Exception:
+ try:
+ mode = self._symbolic_mode_to_octal(path_stat, mode)
+ except Exception as e:
+ path = to_text(b_path)
+ self.fail_json(path=path,
+ msg="mode must be in octal or symbolic form",
+ details=to_native(e))
+
+ if mode != stat.S_IMODE(mode):
+ # prevent mode from having extra info orbeing invalid long number
+ path = to_text(b_path)
+ self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
+
+ prev_mode = stat.S_IMODE(path_stat.st_mode)
+
+ if prev_mode != mode:
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['mode'] = '0%03o' % prev_mode
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['mode'] = '0%03o' % mode
+
+ if self.check_mode:
+ return True
+ # FIXME: comparison against string above will cause this to be executed
+ # every time
+ try:
+ if hasattr(os, 'lchmod'):
+ os.lchmod(b_path, mode)
+ else:
+ if not os.path.islink(b_path):
+ os.chmod(b_path, mode)
+ else:
+ # Attempt to set the perms of the symlink but be
+ # careful not to change the perms of the underlying
+ # file while trying
+ underlying_stat = os.stat(b_path)
+ os.chmod(b_path, mode)
+ new_underlying_stat = os.stat(b_path)
+ if underlying_stat.st_mode != new_underlying_stat.st_mode:
+ os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
+ except OSError as e:
+ if os.path.islink(b_path) and e.errno in (
+ errno.EACCES, # can't access symlink in sticky directory (stat)
+ errno.EPERM, # can't set mode on symbolic links (chmod)
+ errno.EROFS, # can't set mode on read-only filesystem
+ ):
+ pass
+ elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
+ pass
+ else:
+ raise
+ except Exception as e:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chmod failed', details=to_native(e),
+ exception=traceback.format_exc())
+
+ path_stat = os.lstat(b_path)
+ new_mode = stat.S_IMODE(path_stat.st_mode)
+
+ if new_mode != prev_mode:
+ changed = True
+ return changed
+
+ def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
+
+ if attributes is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ existing = self.get_file_attributes(b_path, include_version=False)
+
+ attr_mod = '='
+ if attributes.startswith(('-', '+')):
+ attr_mod = attributes[0]
+ attributes = attributes[1:]
+
+ if existing.get('attr_flags', '') != attributes or attr_mod == '-':
+ attrcmd = self.get_bin_path('chattr')
+ if attrcmd:
+ attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
+ changed = True
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['attributes'] = existing.get('attr_flags')
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
+
+ if not self.check_mode:
+ try:
+ rc, out, err = self.run_command(attrcmd)
+ if rc != 0 or err:
+ raise Exception("Error while setting attributes: %s" % (out + err))
+ except Exception as e:
+ self.fail_json(path=to_text(b_path), msg='chattr failed',
+ details=to_native(e), exception=traceback.format_exc())
+ return changed
+
+ def get_file_attributes(self, path, include_version=True):
+ output = {}
+ attrcmd = self.get_bin_path('lsattr', False)
+ if attrcmd:
+ flags = '-vd' if include_version else '-d'
+ attrcmd = [attrcmd, flags, path]
+ try:
+ rc, out, err = self.run_command(attrcmd)
+ if rc == 0:
+ res = out.split()
+ attr_flags_idx = 0
+ if include_version:
+ attr_flags_idx = 1
+ output['version'] = res[0].strip()
+ output['attr_flags'] = res[attr_flags_idx].replace('-', '').strip()
+ output['attributes'] = format_attributes(output['attr_flags'])
+ except Exception:
+ pass
+ return output
+
+ @classmethod
+ def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
+ """
+ This enables symbolic chmod string parsing as stated in the chmod man-page
+
+ This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
+ """
+
+ new_mode = stat.S_IMODE(path_stat.st_mode)
+
+ # Now parse all symbolic modes
+ for mode in symbolic_mode.split(','):
+ # Per single mode. This always contains a '+', '-' or '='
+ # Split it on that
+ permlist = MODE_OPERATOR_RE.split(mode)
+
+ # And find all the operators
+ opers = MODE_OPERATOR_RE.findall(mode)
+
+ # The user(s) where it's all about is the first element in the
+ # 'permlist' list. Take that and remove it from the list.
+ # An empty user or 'a' means 'all'.
+ users = permlist.pop(0)
+ use_umask = (users == '')
+ if users == 'a' or users == '':
+ users = 'ugo'
+
+ # Check if there are illegal characters in the user list
+ # They can end up in 'users' because they are not split
+ if USERS_RE.match(users):
+ raise ValueError("bad symbolic permission for mode: %s" % mode)
+
+ # Now we have two list of equal length, one contains the requested
+ # permissions and one with the corresponding operators.
+ for idx, perms in enumerate(permlist):
+ # Check if there are illegal characters in the permissions
+ if PERMS_RE.match(perms):
+ raise ValueError("bad symbolic permission for mode: %s" % mode)
+
+ for user in users:
+ mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
+ new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
+
+ return new_mode
+
+ @staticmethod
+ def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
+ if operator == '=':
+ if user == 'u':
+ mask = stat.S_IRWXU | stat.S_ISUID
+ elif user == 'g':
+ mask = stat.S_IRWXG | stat.S_ISGID
+ elif user == 'o':
+ mask = stat.S_IRWXO | stat.S_ISVTX
+
+ # mask out u, g, or o permissions from current_mode and apply new permissions
+ inverse_mask = mask ^ PERM_BITS
+ new_mode = (current_mode & inverse_mask) | mode_to_apply
+ elif operator == '+':
+ new_mode = current_mode | mode_to_apply
+ elif operator == '-':
+ new_mode = current_mode - (current_mode & mode_to_apply)
+ return new_mode
+
+ @staticmethod
+ def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
+ prev_mode = stat.S_IMODE(path_stat.st_mode)
+
+ is_directory = stat.S_ISDIR(path_stat.st_mode)
+ has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
+ apply_X_permission = is_directory or has_x_permissions
+
+ # Get the umask, if the 'user' part is empty, the effect is as if (a) were
+ # given, but bits that are set in the umask are not affected.
+ # We also need the "reversed umask" for masking
+ umask = os.umask(0)
+ os.umask(umask)
+ rev_umask = umask ^ PERM_BITS
+
+ # Permission bits constants documented at:
+ # https://docs.python.org/3/library/stat.html#stat.S_ISUID
+ if apply_X_permission:
+ X_perms = {
+ 'u': {'X': stat.S_IXUSR},
+ 'g': {'X': stat.S_IXGRP},
+ 'o': {'X': stat.S_IXOTH},
+ }
+ else:
+ X_perms = {
+ 'u': {'X': 0},
+ 'g': {'X': 0},
+ 'o': {'X': 0},
+ }
+
+ user_perms_to_modes = {
+ 'u': {
+ 'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
+ 'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
+ 'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
+ 's': stat.S_ISUID,
+ 't': 0,
+ 'u': prev_mode & stat.S_IRWXU,
+ 'g': (prev_mode & stat.S_IRWXG) << 3,
+ 'o': (prev_mode & stat.S_IRWXO) << 6},
+ 'g': {
+ 'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
+ 'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
+ 'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
+ 's': stat.S_ISGID,
+ 't': 0,
+ 'u': (prev_mode & stat.S_IRWXU) >> 3,
+ 'g': prev_mode & stat.S_IRWXG,
+ 'o': (prev_mode & stat.S_IRWXO) << 3},
+ 'o': {
+ 'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
+ 'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
+ 'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
+ 's': 0,
+ 't': stat.S_ISVTX,
+ 'u': (prev_mode & stat.S_IRWXU) >> 6,
+ 'g': (prev_mode & stat.S_IRWXG) >> 3,
+ 'o': prev_mode & stat.S_IRWXO},
+ }
+
+ # Insert X_perms into user_perms_to_modes
+ for key, value in X_perms.items():
+ user_perms_to_modes[key].update(value)
+
+ def or_reduce(mode, perm):
+ return mode | user_perms_to_modes[user][perm]
+
+ return reduce(or_reduce, perms, 0)
+
+ def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ # set modes owners and context as needed
+ changed = self.set_context_if_different(
+ file_args['path'], file_args['secontext'], changed, diff
+ )
+ changed = self.set_owner_if_different(
+ file_args['path'], file_args['owner'], changed, diff, expand
+ )
+ changed = self.set_group_if_different(
+ file_args['path'], file_args['group'], changed, diff, expand
+ )
+ changed = self.set_mode_if_different(
+ file_args['path'], file_args['mode'], changed, diff, expand
+ )
+ changed = self.set_attributes_if_different(
+ file_args['path'], file_args['attributes'], changed, diff, expand
+ )
+ return changed
+
+ def check_file_absent_if_check_mode(self, file_path):
+ return self.check_mode and not os.path.exists(file_path)
+
+ def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
+
+ def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
+
+ def add_path_info(self, kwargs):
+ '''
+ for results that are files, supplement the info about the file
+ in the return path with stats about the file path.
+ '''
+
+ path = kwargs.get('path', kwargs.get('dest', None))
+ if path is None:
+ return kwargs
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.exists(b_path):
+ (uid, gid) = self.user_and_group(path)
+ kwargs['uid'] = uid
+ kwargs['gid'] = gid
+ try:
+ user = pwd.getpwuid(uid)[0]
+ except KeyError:
+ user = str(uid)
+ try:
+ group = grp.getgrgid(gid)[0]
+ except KeyError:
+ group = str(gid)
+ kwargs['owner'] = user
+ kwargs['group'] = group
+ st = os.lstat(b_path)
+ kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
+ # secontext not yet supported
+ if os.path.islink(b_path):
+ kwargs['state'] = 'link'
+ elif os.path.isdir(b_path):
+ kwargs['state'] = 'directory'
+ elif os.stat(b_path).st_nlink > 1:
+ kwargs['state'] = 'hard'
+ else:
+ kwargs['state'] = 'file'
+ if self.selinux_enabled():
+ kwargs['secontext'] = ':'.join(self.selinux_context(path))
+ kwargs['size'] = st[stat.ST_SIZE]
+ return kwargs
+
+ def _check_locale(self):
+ '''
+ Uses the locale module to test the currently set locale
+ (per the LANG and LC_CTYPE environment settings)
+ '''
+ try:
+ # setting the locale to '' uses the default locale
+ # as it would be returned by locale.getdefaultlocale()
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error:
+ # fallback to the 'best' locale, per the function
+ # final fallback is 'C', which may cause unicode issues
+ # but is preferable to simply failing on unknown locale
+ best_locale = get_best_parsable_locale(self)
+
+ # need to set several since many tools choose to ignore documented precedence and scope
+ locale.setlocale(locale.LC_ALL, best_locale)
+ os.environ['LANG'] = best_locale
+ os.environ['LC_ALL'] = best_locale
+ os.environ['LC_MESSAGES'] = best_locale
+ except Exception as e:
+ self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def _set_internal_properties(self, argument_spec=None, module_parameters=None):
+ if argument_spec is None:
+ argument_spec = self.argument_spec
+ if module_parameters is None:
+ module_parameters = self.params
+
+ for k in PASS_VARS:
+ # handle setting internal properties from internal ansible vars
+ param_key = '_ansible_%s' % k
+ if param_key in module_parameters:
+ if k in PASS_BOOLS:
+ setattr(self, PASS_VARS[k][0], self.boolean(module_parameters[param_key]))
+ else:
+ setattr(self, PASS_VARS[k][0], module_parameters[param_key])
+
+ # clean up internal top level params:
+ if param_key in self.params:
+ del self.params[param_key]
+ else:
+ # use defaults if not already set
+ if not hasattr(self, PASS_VARS[k][0]):
+ setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
+
+ def safe_eval(self, value, locals=None, include_exceptions=False):
+ return safe_eval(value, locals, include_exceptions)
+
+ def _load_params(self):
+ ''' read the input and set the params attribute.
+
+ This method is for backwards compatibility. The guts of the function
+ were moved out in 2.1 so that custom modules could read the parameters.
+ '''
+ # debug overrides to read args from file or cmdline
+ self.params = _load_params()
+
+ def _log_to_syslog(self, msg):
+ if HAS_SYSLOG:
+ try:
+ module = 'ansible-%s' % self._name
+ facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
+ syslog.openlog(str(module), 0, facility)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ except TypeError as e:
+ self.fail_json(
+ msg='Failed to log to syslog (%s). To proceed anyway, '
+ 'disable syslog logging by setting no_target_syslog '
+ 'to True in your Ansible config.' % to_native(e),
+ exception=traceback.format_exc(),
+ msg_to_log=msg,
+ )
+
+ def debug(self, msg):
+ if self._debug:
+ self.log('[debug] %s' % msg)
+
+ def log(self, msg, log_args=None):
+
+ if not self.no_log:
+
+ if log_args is None:
+ log_args = dict()
+
+ module = 'ansible-%s' % self._name
+ if isinstance(module, binary_type):
+ module = module.decode('utf-8', 'replace')
+
+ # 6655 - allow for accented characters
+ if not isinstance(msg, (binary_type, text_type)):
+ raise TypeError("msg should be a string (got %s)" % type(msg))
+
+ # We want journal to always take text type
+ # syslog takes bytes on py2, text type on py3
+ if isinstance(msg, binary_type):
+ journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
+ else:
+ # TODO: surrogateescape is a danger here on Py3
+ journal_msg = remove_values(msg, self.no_log_values)
+
+ if PY3:
+ syslog_msg = journal_msg
+ else:
+ syslog_msg = journal_msg.encode('utf-8', 'replace')
+
+ if has_journal:
+ journal_args = [("MODULE", os.path.basename(__file__))]
+ for arg in log_args:
+ name, value = (arg.upper(), str(log_args[arg]))
+ if name in (
+ 'PRIORITY', 'MESSAGE', 'MESSAGE_ID',
+ 'CODE_FILE', 'CODE_LINE', 'CODE_FUNC',
+ 'SYSLOG_FACILITY', 'SYSLOG_IDENTIFIER',
+ 'SYSLOG_PID',
+ ):
+ name = "_%s" % name
+ journal_args.append((name, value))
+
+ try:
+ if HAS_SYSLOG:
+ # If syslog_facility specified, it needs to convert
+ # from the facility name to the facility code, and
+ # set it as SYSLOG_FACILITY argument of journal.send()
+ facility = getattr(syslog,
+ self._syslog_facility,
+ syslog.LOG_USER) >> 3
+ journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
+ SYSLOG_FACILITY=facility,
+ **dict(journal_args))
+ else:
+ journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
+ **dict(journal_args))
+ except IOError:
+ # fall back to syslog since logging to journal failed
+ self._log_to_syslog(syslog_msg)
+ else:
+ self._log_to_syslog(syslog_msg)
+
+ def _log_invocation(self):
+ ''' log that ansible ran the module '''
+ # TODO: generalize a separate log function and make log_invocation use it
+ # Sanitize possible password argument when logging.
+ log_args = dict()
+
+ for param in self.params:
+ canon = self.aliases.get(param, param)
+ arg_opts = self.argument_spec.get(canon, {})
+ no_log = arg_opts.get('no_log', None)
+
+ # try to proactively capture password/passphrase fields
+ if no_log is None and PASSWORD_MATCH.search(param):
+ log_args[param] = 'NOT_LOGGING_PASSWORD'
+ self.warn('Module did not set no_log for %s' % param)
+ elif self.boolean(no_log):
+ log_args[param] = 'NOT_LOGGING_PARAMETER'
+ else:
+ param_val = self.params[param]
+ if not isinstance(param_val, (text_type, binary_type)):
+ param_val = str(param_val)
+ elif isinstance(param_val, text_type):
+ param_val = param_val.encode('utf-8')
+ log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
+
+ msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
+ if msg:
+ msg = 'Invoked with %s' % ' '.join(msg)
+ else:
+ msg = 'Invoked'
+
+ self.log(msg, log_args=log_args)
+
+ def _set_cwd(self):
+ try:
+ cwd = os.getcwd()
+ if not os.access(cwd, os.F_OK | os.R_OK):
+ raise Exception()
+ return cwd
+ except Exception:
+ # we don't have access to the cwd, probably because of sudo.
+ # Try and move to a neutral location to prevent errors
+ for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
+ try:
+ if os.access(cwd, os.F_OK | os.R_OK):
+ os.chdir(cwd)
+ return cwd
+ except Exception:
+ pass
+ # we won't error here, as it may *not* be a problem,
+ # and we don't want to break modules unnecessarily
+ return None
+
+ def get_bin_path(self, arg, required=False, opt_dirs=None):
+ '''
+ Find system executable in PATH.
+
+ :param arg: The executable to find.
+ :param required: if executable is not found and required is ``True``, fail_json
+ :param opt_dirs: optional list of directories to search in addition to ``PATH``
+ :returns: if found return full path; otherwise return None
+ '''
+
+ bin_path = None
+ try:
+ bin_path = get_bin_path(arg=arg, opt_dirs=opt_dirs)
+ except ValueError as e:
+ if required:
+ self.fail_json(msg=to_text(e))
+ else:
+ return bin_path
+
+ return bin_path
+
+ def boolean(self, arg):
+ '''Convert the argument to a boolean'''
+ if arg is None:
+ return arg
+
+ try:
+ return boolean(arg)
+ except TypeError as e:
+ self.fail_json(msg=to_native(e))
+
+ def jsonify(self, data):
+ try:
+ return jsonify(data)
+ except UnicodeError as e:
+ self.fail_json(msg=to_text(e))
+
+ def from_json(self, data):
+ return json.loads(data)
+
+ def add_cleanup_file(self, path):
+ if path not in self.cleanup_files:
+ self.cleanup_files.append(path)
+
+ def do_cleanup_files(self):
+ for path in self.cleanup_files:
+ self.cleanup(path)
+
+ def _return_formatted(self, kwargs):
+
+ self.add_path_info(kwargs)
+
+ if 'invocation' not in kwargs:
+ kwargs['invocation'] = {'module_args': self.params}
+
+ if 'warnings' in kwargs:
+ if isinstance(kwargs['warnings'], list):
+ for w in kwargs['warnings']:
+ self.warn(w)
+ else:
+ self.warn(kwargs['warnings'])
+
+ warnings = get_warning_messages()
+ if warnings:
+ kwargs['warnings'] = warnings
+
+ if 'deprecations' in kwargs:
+ if isinstance(kwargs['deprecations'], list):
+ for d in kwargs['deprecations']:
+ if isinstance(d, SEQUENCETYPE) and len(d) == 2:
+ self.deprecate(d[0], version=d[1])
+ elif isinstance(d, Mapping):
+ self.deprecate(d['msg'], version=d.get('version'), date=d.get('date'),
+ collection_name=d.get('collection_name'))
+ else:
+ self.deprecate(d) # pylint: disable=ansible-deprecated-no-version
+ else:
+ self.deprecate(kwargs['deprecations']) # pylint: disable=ansible-deprecated-no-version
+
+ deprecations = get_deprecation_messages()
+ if deprecations:
+ kwargs['deprecations'] = deprecations
+
+ kwargs = remove_values(kwargs, self.no_log_values)
+ print('\n%s' % self.jsonify(kwargs))
+
+ def exit_json(self, **kwargs):
+ ''' return from the module, without error '''
+
+ self.do_cleanup_files()
+ self._return_formatted(kwargs)
+ sys.exit(0)
+
+ def fail_json(self, msg, **kwargs):
+ ''' return from the module, with an error message '''
+
+ kwargs['failed'] = True
+ kwargs['msg'] = msg
+
+ # Add traceback if debug or high verbosity and it is missing
+ # NOTE: Badly named as exception, it really always has been a traceback
+ if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
+ if PY2:
+ # On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
+ kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
+ ''.join(traceback.format_tb(sys.exc_info()[2]))
+ else:
+ kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
+
+ self.do_cleanup_files()
+ self._return_formatted(kwargs)
+ sys.exit(1)
+
+ def fail_on_missing_params(self, required_params=None):
+ if not required_params:
+ return
+ try:
+ check_missing_parameters(self.params, required_params)
+ except TypeError as e:
+ self.fail_json(msg=to_native(e))
+
+ def digest_from_file(self, filename, algorithm):
+ ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
+ b_filename = to_bytes(filename, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_filename):
+ return None
+ if os.path.isdir(b_filename):
+ self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
+
+ # preserve old behaviour where the third parameter was a hash algorithm object
+ if hasattr(algorithm, 'hexdigest'):
+ digest_method = algorithm
+ else:
+ try:
+ digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
+ except KeyError:
+ self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
+ (filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
+
+ blocksize = 64 * 1024
+ infile = open(os.path.realpath(b_filename), 'rb')
+ block = infile.read(blocksize)
+ while block:
+ digest_method.update(block)
+ block = infile.read(blocksize)
+ infile.close()
+ return digest_method.hexdigest()
+
+ def md5(self, filename):
+ ''' Return MD5 hex digest of local file using digest_from_file().
+
+ Do not use this function unless you have no other choice for:
+ 1) Optional backwards compatibility
+ 2) Compatibility with a third party protocol
+
+ This function will not work on systems complying with FIPS-140-2.
+
+ Most uses of this function can use the module.sha1 function instead.
+ '''
+ if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return self.digest_from_file(filename, 'md5')
+
+ def sha1(self, filename):
+ ''' Return SHA1 hex digest of local file using digest_from_file(). '''
+ return self.digest_from_file(filename, 'sha1')
+
+ def sha256(self, filename):
+ ''' Return SHA-256 hex digest of local file using digest_from_file(). '''
+ return self.digest_from_file(filename, 'sha256')
+
+ def backup_local(self, fn):
+ '''make a date-marked backup of the specified file, return True or False on success or failure'''
+
+ backupdest = ''
+ if os.path.exists(fn):
+ # backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
+ backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
+
+ try:
+ self.preserved_copy(fn, backupdest)
+ except (shutil.Error, IOError) as e:
+ self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
+
+ return backupdest
+
+ def cleanup(self, tmpfile):
+ if os.path.exists(tmpfile):
+ try:
+ os.unlink(tmpfile)
+ except OSError as e:
+ sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
+
+ def preserved_copy(self, src, dest):
+ """Copy a file with preserved ownership, permissions and context"""
+
+ # shutil.copy2(src, dst)
+ # Similar to shutil.copy(), but metadata is copied as well - in fact,
+ # this is just shutil.copy() followed by copystat(). This is similar
+ # to the Unix command cp -p.
+ #
+ # shutil.copystat(src, dst)
+ # Copy the permission bits, last access time, last modification time,
+ # and flags from src to dst. The file contents, owner, and group are
+ # unaffected. src and dst are path names given as strings.
+
+ shutil.copy2(src, dest)
+
+ # Set the context
+ if self.selinux_enabled():
+ context = self.selinux_context(src)
+ self.set_context_if_different(dest, context, False)
+
+ # chown it
+ try:
+ dest_stat = os.stat(src)
+ tmp_stat = os.stat(dest)
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+
+ # Set the attributes
+ current_attribs = self.get_file_attributes(src, include_version=False)
+ current_attribs = current_attribs.get('attr_flags', '')
+ self.set_attributes_if_different(dest, current_attribs, True)
+
+ def atomic_move(self, src, dest, unsafe_writes=False):
+ '''atomically move src to dest, copying attributes from dest, returns true on success
+ it uses os.rename to ensure this as it is an atomic operation, rest of the function is
+ to work around limitations, corner cases and ensure selinux context is saved if possible'''
+ context = None
+ dest_stat = None
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if os.path.exists(b_dest):
+ try:
+ dest_stat = os.stat(b_dest)
+
+ # copy mode and ownership
+ os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
+ os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
+
+ # try to copy flags if possible
+ if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
+ try:
+ os.chflags(b_src, dest_stat.st_flags)
+ except OSError as e:
+ for err in 'EOPNOTSUPP', 'ENOTSUP':
+ if hasattr(errno, err) and e.errno == getattr(errno, err):
+ break
+ else:
+ raise
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+ if self.selinux_enabled():
+ context = self.selinux_context(dest)
+ else:
+ if self.selinux_enabled():
+ context = self.selinux_default_context(dest)
+
+ creating = not os.path.exists(b_dest)
+
+ try:
+ # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
+ os.rename(b_src, b_dest)
+ except (IOError, OSError) as e:
+ if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
+ # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
+ # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
+ self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
+ else:
+ # Use bytes here. In the shippable CI, this fails with
+ # a UnicodeError with surrogateescape'd strings for an unknown
+ # reason (doesn't happen in a local Ubuntu16.04 VM)
+ b_dest_dir = os.path.dirname(b_dest)
+ b_suffix = os.path.basename(b_dest)
+ error_msg = None
+ tmp_dest_name = None
+ try:
+ tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp', dir=b_dest_dir, suffix=b_suffix)
+ except (OSError, IOError) as e:
+ error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
+ except TypeError:
+ # We expect that this is happening because python3.4.x and
+ # below can't handle byte strings in mkstemp().
+ # Traceback would end in something like:
+ # file = _os.path.join(dir, pre + name + suf)
+ # TypeError: can't concat bytes to str
+ error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
+ 'Please use Python2.x or Python3.5 or greater.')
+ finally:
+ if error_msg:
+ if unsafe_writes:
+ self._unsafe_writes(b_src, b_dest)
+ else:
+ self.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+ if tmp_dest_name:
+ b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
+
+ try:
+ try:
+ # close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
+ os.close(tmp_dest_fd)
+ # leaves tmp file behind when sudo and not root
+ try:
+ shutil.move(b_src, b_tmp_dest_name)
+ except OSError:
+ # cleanup will happen by 'rm' of tmpdir
+ # copy2 will preserve some metadata
+ shutil.copy2(b_src, b_tmp_dest_name)
+
+ if self.selinux_enabled():
+ self.set_context_if_different(
+ b_tmp_dest_name, context, False)
+ try:
+ tmp_stat = os.stat(b_tmp_dest_name)
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+ try:
+ os.rename(b_tmp_dest_name, b_dest)
+ except (shutil.Error, OSError, IOError) as e:
+ if unsafe_writes and e.errno == errno.EBUSY:
+ self._unsafe_writes(b_tmp_dest_name, b_dest)
+ else:
+ self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
+ (src, dest, b_tmp_dest_name, to_native(e)), exception=traceback.format_exc())
+ except (shutil.Error, OSError, IOError) as e:
+ if unsafe_writes:
+ self._unsafe_writes(b_src, b_dest)
+ else:
+ self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
+ finally:
+ self.cleanup(b_tmp_dest_name)
+
+ if creating:
+ # make sure the file has the correct permissions
+ # based on the current value of umask
+ umask = os.umask(0)
+ os.umask(umask)
+ os.chmod(b_dest, DEFAULT_PERM & ~umask)
+ try:
+ os.chown(b_dest, os.geteuid(), os.getegid())
+ except OSError:
+ # We're okay with trying our best here. If the user is not
+ # root (or old Unices) they won't be able to chown.
+ pass
+
+ if self.selinux_enabled():
+ # rename might not preserve context
+ self.set_context_if_different(dest, context, False)
+
+ def _unsafe_writes(self, src, dest):
+ # sadly there are some situations where we cannot ensure atomicity, but only if
+ # the user insists and we get the appropriate error we update the file unsafely
+ try:
+ out_dest = in_src = None
+ try:
+ out_dest = open(dest, 'wb')
+ in_src = open(src, 'rb')
+ shutil.copyfileobj(in_src, out_dest)
+ finally: # assuring closed files in 2.4 compatible way
+ if out_dest:
+ out_dest.close()
+ if in_src:
+ in_src.close()
+ except (shutil.Error, OSError, IOError) as e:
+ self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
+ exception=traceback.format_exc())
+
+ def _clean_args(self, args):
+
+ if not self._clean:
+ # create a printable version of the command for use in reporting later,
+ # which strips out things like passwords from the args list
+ to_clean_args = args
+ if PY2:
+ if isinstance(args, text_type):
+ to_clean_args = to_bytes(args)
+ else:
+ if isinstance(args, binary_type):
+ to_clean_args = to_text(args)
+ if isinstance(args, (text_type, binary_type)):
+ to_clean_args = shlex.split(to_clean_args)
+
+ clean_args = []
+ is_passwd = False
+ for arg in (to_native(a) for a in to_clean_args):
+ if is_passwd:
+ is_passwd = False
+ clean_args.append('********')
+ continue
+ if PASSWD_ARG_RE.match(arg):
+ sep_idx = arg.find('=')
+ if sep_idx > -1:
+ clean_args.append('%s=********' % arg[:sep_idx])
+ continue
+ else:
+ is_passwd = True
+ arg = heuristic_log_sanitize(arg, self.no_log_values)
+ clean_args.append(arg)
+ self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
+
+ return self._clean
+
+ def _restore_signal_handlers(self):
+ # Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
+ if PY2 and sys.platform != 'win32':
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
+ use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
+ expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None, ignore_invalid_cwd=True, handle_exceptions=True):
+ '''
+ Execute a command, returns rc, stdout, and stderr.
+
+ :arg args: is the command to run
+ * If args is a list, the command will be run with shell=False.
+ * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
+ * If args is a string and use_unsafe_shell=True it runs with shell=True.
+ :kw check_rc: Whether to call fail_json in case of non zero RC.
+ Default False
+ :kw close_fds: See documentation for subprocess.Popen(). Default True
+ :kw executable: See documentation for subprocess.Popen(). Default None
+ :kw data: If given, information to write to the stdin of the command
+ :kw binary_data: If False, append a newline to the data. Default False
+ :kw path_prefix: If given, additional path to find the command in.
+ This adds to the PATH environment variable so helper commands in
+ the same directory can also be found
+ :kw cwd: If given, working directory to run the command inside
+ :kw use_unsafe_shell: See `args` parameter. Default False
+ :kw prompt_regex: Regex string (not a compiled regex) which can be
+ used to detect prompts in the stdout which would otherwise cause
+ the execution to hang (especially if no input data is specified)
+ :kw environ_update: dictionary to *update* environ variables with
+ :kw umask: Umask to be used when running the command. Default None
+ :kw encoding: Since we return native strings, on python3 we need to
+ know the encoding to use to transform from bytes to text. If you
+ want to always get bytes back, use encoding=None. The default is
+ "utf-8". This does not affect transformation of strings given as
+ args.
+ :kw errors: Since we return native strings, on python3 we need to
+ transform stdout and stderr from bytes to text. If the bytes are
+ undecodable in the ``encoding`` specified, then use this error
+ handler to deal with them. The default is ``surrogate_or_strict``
+ which means that the bytes will be decoded using the
+ surrogateescape error handler if available (available on all
+ python3 versions we support) otherwise a UnicodeError traceback
+ will be raised. This does not affect transformations of strings
+ given as args.
+ :kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
+ dictates whether ``~`` is expanded in paths and environment variables
+ are expanded before running the command. When ``True`` a string such as
+ ``$SHELL`` will be expanded regardless of escaping. When ``False`` and
+ ``use_unsafe_shell=False`` no path or variable expansion will be done.
+ :kw pass_fds: When running on Python 3 this argument
+ dictates which file descriptors should be passed
+ to an underlying ``Popen`` constructor. On Python 2, this will
+ set ``close_fds`` to False.
+ :kw before_communicate_callback: This function will be called
+ after ``Popen`` object will be created
+ but before communicating to the process.
+ (``Popen`` object will be passed to callback as a first argument)
+ :kw ignore_invalid_cwd: This flag indicates whether an invalid ``cwd``
+ (non-existent or not a directory) should be ignored or should raise
+ an exception.
+ :kw handle_exceptions: This flag indicates whether an exception will
+ be handled inline and issue a failed_json or if the caller should
+ handle it.
+ :returns: A 3-tuple of return code (integer), stdout (native string),
+ and stderr (native string). On python2, stdout and stderr are both
+ byte strings. On python3, stdout and stderr are text strings converted
+ according to the encoding and errors parameters. If you want byte
+ strings on python3, use encoding=None to turn decoding to text off.
+ '''
+ # used by clean args later on
+ self._clean = None
+
+ if not isinstance(args, (list, binary_type, text_type)):
+ msg = "Argument 'args' to run_command must be list or string"
+ self.fail_json(rc=257, cmd=args, msg=msg)
+
+ shell = False
+ if use_unsafe_shell:
+
+ # stringify args for unsafe/direct shell usage
+ if isinstance(args, list):
+ args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
+ else:
+ args = to_bytes(args, errors='surrogate_or_strict')
+
+ # not set explicitly, check if set by controller
+ if executable:
+ executable = to_bytes(executable, errors='surrogate_or_strict')
+ args = [executable, b'-c', args]
+ elif self._shell not in (None, '/bin/sh'):
+ args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
+ else:
+ shell = True
+ else:
+ # ensure args are a list
+ if isinstance(args, (binary_type, text_type)):
+ # On python2.6 and below, shlex has problems with text type
+ # On python3, shlex needs a text type.
+ if PY2:
+ args = to_bytes(args, errors='surrogate_or_strict')
+ elif PY3:
+ args = to_text(args, errors='surrogateescape')
+ args = shlex.split(args)
+
+ # expand ``~`` in paths, and all environment vars
+ if expand_user_and_vars:
+ args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
+ else:
+ args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
+
+ prompt_re = None
+ if prompt_regex:
+ if isinstance(prompt_regex, text_type):
+ if PY3:
+ prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
+ elif PY2:
+ prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
+ try:
+ prompt_re = re.compile(prompt_regex, re.MULTILINE)
+ except re.error:
+ self.fail_json(msg="invalid prompt regular expression given to run_command")
+
+ rc = 0
+ msg = None
+ st_in = None
+
+ env = os.environ.copy()
+ # We can set this from both an attribute and per call
+ env.update(self.run_command_environ_update or {})
+ env.update(environ_update or {})
+ if path_prefix:
+ path = env.get('PATH', '')
+ if path:
+ env['PATH'] = "%s:%s" % (path_prefix, path)
+ else:
+ env['PATH'] = path_prefix
+
+ # If using test-module.py and explode, the remote lib path will resemble:
+ # /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
+ # If using ansible or ansible-playbook with a remote system:
+ # /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
+
+ # Clean out python paths set by ansiballz
+ if 'PYTHONPATH' in env:
+ pypaths = [x for x in env['PYTHONPATH'].split(':')
+ if x and
+ not x.endswith('/ansible_modlib.zip') and
+ not x.endswith('/debug_dir')]
+ if pypaths and any(pypaths):
+ env['PYTHONPATH'] = ':'.join(pypaths)
+
+ if data:
+ st_in = subprocess.PIPE
+
+ def preexec():
+ self._restore_signal_handlers()
+ if umask:
+ os.umask(umask)
+
+ kwargs = dict(
+ executable=executable,
+ shell=shell,
+ close_fds=close_fds,
+ stdin=st_in,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=preexec,
+ env=env,
+ )
+ if PY3 and pass_fds:
+ kwargs["pass_fds"] = pass_fds
+ elif PY2 and pass_fds:
+ kwargs['close_fds'] = False
+
+ # make sure we're in the right working directory
+ if cwd:
+ cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
+ if os.path.isdir(cwd):
+ kwargs['cwd'] = cwd
+ elif not ignore_invalid_cwd:
+ self.fail_json(msg="Provided cwd is not a valid directory: %s" % cwd)
+
+ try:
+ if self._debug:
+ self.log('Executing: ' + self._clean_args(args))
+ cmd = subprocess.Popen(args, **kwargs)
+ if before_communicate_callback:
+ before_communicate_callback(cmd)
+
+ # the communication logic here is essentially taken from that
+ # of the _communicate() function in ssh.py
+
+ stdout = b''
+ stderr = b''
+ try:
+ selector = selectors.DefaultSelector()
+ except (IOError, OSError):
+ # Failed to detect default selector for the given platform
+ # Select PollSelector which is supported by major platforms
+ selector = selectors.PollSelector()
+
+ selector.register(cmd.stdout, selectors.EVENT_READ)
+ selector.register(cmd.stderr, selectors.EVENT_READ)
+ if os.name == 'posix':
+ fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ if data:
+ if not binary_data:
+ data += '\n'
+ if isinstance(data, text_type):
+ data = to_bytes(data)
+ cmd.stdin.write(data)
+ cmd.stdin.close()
+
+ while True:
+ events = selector.select(1)
+ for key, event in events:
+ b_chunk = key.fileobj.read()
+ if b_chunk == b(''):
+ selector.unregister(key.fileobj)
+ if key.fileobj == cmd.stdout:
+ stdout += b_chunk
+ elif key.fileobj == cmd.stderr:
+ stderr += b_chunk
+ # if we're checking for prompts, do it now
+ if prompt_re:
+ if prompt_re.search(stdout) and not data:
+ if encoding:
+ stdout = to_native(stdout, encoding=encoding, errors=errors)
+ return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
+ # only break out if no pipes are left to read or
+ # the pipes are completely read and
+ # the process is terminated
+ if (not events or not selector.get_map()) and cmd.poll() is not None:
+ break
+ # No pipes are left to read but process is not yet terminated
+ # Only then it is safe to wait for the process to be finished
+ # NOTE: Actually cmd.poll() is always None here if no selectors are left
+ elif not selector.get_map() and cmd.poll() is None:
+ cmd.wait()
+ # The process is terminated. Since no pipes to read from are
+ # left, there is no need to call select() again.
+ break
+
+ cmd.stdout.close()
+ cmd.stderr.close()
+ selector.close()
+
+ rc = cmd.returncode
+ except (OSError, IOError) as e:
+ self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
+ if handle_exceptions:
+ self.fail_json(rc=e.errno, stdout=b'', stderr=b'', msg=to_native(e), cmd=self._clean_args(args))
+ else:
+ raise e
+ except Exception as e:
+ self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
+ if handle_exceptions:
+ self.fail_json(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
+ else:
+ raise e
+
+ if rc != 0 and check_rc:
+ msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
+ self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
+
+ if encoding is not None:
+ return (rc, to_native(stdout, encoding=encoding, errors=errors),
+ to_native(stderr, encoding=encoding, errors=errors))
+
+ return (rc, stdout, stderr)
+
+ def append_to_file(self, filename, str):
+ filename = os.path.expandvars(os.path.expanduser(filename))
+ fh = open(filename, 'a')
+ fh.write(str)
+ fh.close()
+
+ def bytes_to_human(self, size):
+ return bytes_to_human(size)
+
+ # for backwards compatibility
+ pretty_bytes = bytes_to_human
+
+ def human_to_bytes(self, number, isbits=False):
+ return human_to_bytes(number, isbits)
+
+ #
+ # Backwards compat
+ #
+
+ # In 2.0, moved from inside the module to the toplevel
+ is_executable = is_executable
+
+ @staticmethod
+ def get_buffer_size(fd):
+ try:
+ # 1032 == FZ_GETPIPE_SZ
+ buffer_size = fcntl.fcntl(fd, 1032)
+ except Exception:
+ try:
+ # not as exact as above, but should be good enough for most platforms that fail the previous call
+ buffer_size = select.PIPE_BUF
+ except Exception:
+ buffer_size = 9000 # use sane default JIC
+
+ return buffer_size
+
+
+def get_module_path():
+ return os.path.dirname(os.path.realpath(__file__))
diff --git a/lib/ansible/module_utils/common/__init__.py b/lib/ansible/module_utils/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/common/__init__.py
diff --git a/lib/ansible/module_utils/common/_collections_compat.py b/lib/ansible/module_utils/common/_collections_compat.py
new file mode 100644
index 0000000..3412408
--- /dev/null
+++ b/lib/ansible/module_utils/common/_collections_compat.py
@@ -0,0 +1,46 @@
+# Copyright (c), Sviatoslav Sydorenko <ssydoren@redhat.com> 2018
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""Collections ABC import shim.
+
+This module is intended only for internal use.
+It will go away once the bundled copy of six includes equivalent functionality.
+Third parties should not use this.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+try:
+ """Python 3.3+ branch."""
+ from collections.abc import (
+ MappingView,
+ ItemsView,
+ KeysView,
+ ValuesView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+ Container,
+ Hashable,
+ Sized,
+ Callable,
+ Iterable,
+ Iterator,
+ )
+except ImportError:
+ """Use old lib location under 2.6-3.2."""
+ from collections import ( # type: ignore[no-redef,attr-defined] # pylint: disable=deprecated-class
+ MappingView,
+ ItemsView,
+ KeysView,
+ ValuesView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+ Container,
+ Hashable,
+ Sized,
+ Callable,
+ Iterable,
+ Iterator,
+ )
diff --git a/lib/ansible/module_utils/common/_json_compat.py b/lib/ansible/module_utils/common/_json_compat.py
new file mode 100644
index 0000000..787af0f
--- /dev/null
+++ b/lib/ansible/module_utils/common/_json_compat.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import types
+import json
+
+# Detect the python-json library which is incompatible
+try:
+ if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
+ raise ImportError('json.loads or json.dumps were not found in the imported json library.')
+except AttributeError:
+ raise ImportError('python-json was detected, which is incompatible.')
diff --git a/lib/ansible/module_utils/common/_utils.py b/lib/ansible/module_utils/common/_utils.py
new file mode 100644
index 0000000..66df316
--- /dev/null
+++ b/lib/ansible/module_utils/common/_utils.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""
+Modules in _utils are waiting to find a better home. If you need to use them, be prepared for them
+to move to a different location in the future.
+"""
+
+
+def get_all_subclasses(cls):
+ '''
+ Recursively search and find all subclasses of a given class
+
+ :arg cls: A python class
+ :rtype: set
+ :returns: The set of python classes which are the subclasses of `cls`.
+
+ In python, you can use a class's :py:meth:`__subclasses__` method to determine what subclasses
+ of a class exist. However, `__subclasses__` only goes one level deep. This function searches
+ each child class's `__subclasses__` method to find all of the descendent classes. It then
+ returns an iterable of the descendent classes.
+ '''
+ # Retrieve direct subclasses
+ subclasses = set(cls.__subclasses__())
+ to_visit = list(subclasses)
+ # Then visit all subclasses
+ while to_visit:
+ for sc in to_visit:
+ # The current class is now visited, so remove it from list
+ to_visit.remove(sc)
+ # Appending all subclasses to visit and keep a reference of available class
+ for ssc in sc.__subclasses__():
+ if ssc not in subclasses:
+ to_visit.append(ssc)
+ subclasses.add(ssc)
+ return subclasses
diff --git a/lib/ansible/module_utils/common/arg_spec.py b/lib/ansible/module_utils/common/arg_spec.py
new file mode 100644
index 0000000..d9f716e
--- /dev/null
+++ b/lib/ansible/module_utils/common/arg_spec.py
@@ -0,0 +1,311 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible.module_utils.common.parameters import (
+ _ADDITIONAL_CHECKS,
+ _get_legal_inputs,
+ _get_unsupported_parameters,
+ _handle_aliases,
+ _list_deprecations,
+ _list_no_log_values,
+ _set_defaults,
+ _validate_argument_types,
+ _validate_argument_values,
+ _validate_sub_spec,
+ set_fallbacks,
+)
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.warnings import deprecate, warn
+
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments,
+)
+
+from ansible.module_utils.errors import (
+ AliasError,
+ AnsibleValidationErrorMultiple,
+ DeprecationError,
+ MutuallyExclusiveError,
+ NoLogError,
+ RequiredDefaultError,
+ RequiredError,
+ UnsupportedError,
+)
+
+
+class ValidationResult:
+ """Result of argument spec validation.
+
+ This is the object returned by :func:`ArgumentSpecValidator.validate()
+ <ansible.module_utils.common.arg_spec.ArgumentSpecValidator.validate()>`
+ containing the validated parameters and any errors.
+ """
+
+ def __init__(self, parameters):
+ """
+ :arg parameters: Terms to be validated and coerced to the correct type.
+ :type parameters: dict
+ """
+ self._no_log_values = set()
+ """:class:`set` of values marked as ``no_log`` in the argument spec. This
+ is a temporary holding place for these values and may move in the future.
+ """
+
+ self._unsupported_parameters = set()
+ self._supported_parameters = dict()
+ self._validated_parameters = deepcopy(parameters)
+ self._deprecations = []
+ self._warnings = []
+ self._aliases = {}
+ self.errors = AnsibleValidationErrorMultiple()
+ """
+ :class:`~ansible.module_utils.errors.AnsibleValidationErrorMultiple` containing all
+ :class:`~ansible.module_utils.errors.AnsibleValidationError` objects if there were
+ any failures during validation.
+ """
+
+ @property
+ def validated_parameters(self):
+ """Validated and coerced parameters."""
+ return self._validated_parameters
+
+ @property
+ def unsupported_parameters(self):
+ """:class:`set` of unsupported parameter names."""
+ return self._unsupported_parameters
+
+ @property
+ def error_messages(self):
+ """:class:`list` of all error messages from each exception in :attr:`errors`."""
+ return self.errors.messages
+
+
+class ArgumentSpecValidator:
+ """Argument spec validation class
+
+ Creates a validator based on the ``argument_spec`` that can be used to
+ validate a number of parameters using the :meth:`validate` method.
+ """
+
+ def __init__(self, argument_spec,
+ mutually_exclusive=None,
+ required_together=None,
+ required_one_of=None,
+ required_if=None,
+ required_by=None,
+ ):
+
+ """
+ :arg argument_spec: Specification of valid parameters and their type. May
+ include nested argument specs.
+ :type argument_spec: dict[str, dict]
+
+ :kwarg mutually_exclusive: List or list of lists of terms that should not
+ be provided together.
+ :type mutually_exclusive: list[str] or list[list[str]]
+
+ :kwarg required_together: List of lists of terms that are required together.
+ :type required_together: list[list[str]]
+
+ :kwarg required_one_of: List of lists of terms, one of which in each list
+ is required.
+ :type required_one_of: list[list[str]]
+
+ :kwarg required_if: List of lists of ``[parameter, value, [parameters]]`` where
+ one of ``[parameters]`` is required if ``parameter == value``.
+ :type required_if: list
+
+ :kwarg required_by: Dictionary of parameter names that contain a list of
+ parameters required by each key in the dictionary.
+ :type required_by: dict[str, list[str]]
+ """
+
+ self._mutually_exclusive = mutually_exclusive
+ self._required_together = required_together
+ self._required_one_of = required_one_of
+ self._required_if = required_if
+ self._required_by = required_by
+ self._valid_parameter_names = set()
+ self.argument_spec = argument_spec
+
+ for key in sorted(self.argument_spec.keys()):
+ aliases = self.argument_spec[key].get('aliases')
+ if aliases:
+ self._valid_parameter_names.update(["{key} ({aliases})".format(key=key, aliases=", ".join(sorted(aliases)))])
+ else:
+ self._valid_parameter_names.update([key])
+
+ def validate(self, parameters, *args, **kwargs):
+ """Validate ``parameters`` against argument spec.
+
+ Error messages in the :class:`ValidationResult` may contain no_log values and should be
+ sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying.
+
+ :arg parameters: Parameters to validate against the argument spec
+ :type parameters: dict[str, dict]
+
+ :return: :class:`ValidationResult` containing validated parameters.
+
+ :Simple Example:
+
+ .. code-block:: text
+
+ argument_spec = {
+ 'name': {'type': 'str'},
+ 'age': {'type': 'int'},
+ }
+
+ parameters = {
+ 'name': 'bo',
+ 'age': '42',
+ }
+
+ validator = ArgumentSpecValidator(argument_spec)
+ result = validator.validate(parameters)
+
+ if result.error_messages:
+ sys.exit("Validation failed: {0}".format(", ".join(result.error_messages))
+
+ valid_params = result.validated_parameters
+ """
+
+ result = ValidationResult(parameters)
+
+ result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters))
+
+ alias_warnings = []
+ alias_deprecations = []
+ try:
+ result._aliases.update(_handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations))
+ except (TypeError, ValueError) as e:
+ result.errors.append(AliasError(to_native(e)))
+
+ legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, result._aliases)
+
+ for option, alias in alias_warnings:
+ result._warnings.append({'option': option, 'alias': alias})
+
+ for deprecation in alias_deprecations:
+ result._deprecations.append({
+ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
+
+ try:
+ result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters))
+ except TypeError as te:
+ result.errors.append(NoLogError(to_native(te)))
+
+ try:
+ result._deprecations.extend(_list_deprecations(self.argument_spec, result._validated_parameters))
+ except TypeError as te:
+ result.errors.append(DeprecationError(to_native(te)))
+
+ try:
+ result._unsupported_parameters.update(
+ _get_unsupported_parameters(
+ self.argument_spec,
+ result._validated_parameters,
+ legal_inputs,
+ store_supported=result._supported_parameters,
+ )
+ )
+ except TypeError as te:
+ result.errors.append(RequiredDefaultError(to_native(te)))
+ except ValueError as ve:
+ result.errors.append(AliasError(to_native(ve)))
+
+ try:
+ check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters)
+ except TypeError as te:
+ result.errors.append(MutuallyExclusiveError(to_native(te)))
+
+ result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False))
+
+ try:
+ check_required_arguments(self.argument_spec, result._validated_parameters)
+ except TypeError as e:
+ result.errors.append(RequiredError(to_native(e)))
+
+ _validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors)
+ _validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors)
+
+ for check in _ADDITIONAL_CHECKS:
+ try:
+ check['func'](getattr(self, "_{attr}".format(attr=check['attr'])), result._validated_parameters)
+ except TypeError as te:
+ result.errors.append(check['err'](to_native(te)))
+
+ result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters))
+
+ alias_deprecations = []
+ _validate_sub_spec(self.argument_spec, result._validated_parameters,
+ errors=result.errors,
+ no_log_values=result._no_log_values,
+ unsupported_parameters=result._unsupported_parameters,
+ supported_parameters=result._supported_parameters,
+ alias_deprecations=alias_deprecations,)
+ for deprecation in alias_deprecations:
+ result._deprecations.append({
+ 'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
+
+ if result._unsupported_parameters:
+ flattened_names = []
+ for item in result._unsupported_parameters:
+ if isinstance(item, tuple):
+ flattened_names.append(".".join(item))
+ else:
+ flattened_names.append(item)
+
+ unsupported_string = ", ".join(sorted(list(flattened_names)))
+ supported_params = supported_aliases = []
+ if result._supported_parameters.get(item):
+ supported_params = sorted(list(result._supported_parameters[item][0]))
+ supported_aliases = sorted(list(result._supported_parameters[item][1]))
+ supported_string = ", ".join(supported_params)
+ if supported_aliases:
+ aliases_string = ", ".join(supported_aliases)
+ supported_string += " (%s)" % aliases_string
+
+ msg = "{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string)
+ result.errors.append(UnsupportedError(msg))
+
+ return result
+
+
+class ModuleArgumentSpecValidator(ArgumentSpecValidator):
+ """Argument spec validation class used by :class:`AnsibleModule`.
+
+ This is not meant to be used outside of :class:`AnsibleModule`. Use
+ :class:`ArgumentSpecValidator` instead.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(ModuleArgumentSpecValidator, self).__init__(*args, **kwargs)
+
+ def validate(self, parameters):
+ result = super(ModuleArgumentSpecValidator, self).validate(parameters)
+
+ for d in result._deprecations:
+ deprecate(d['msg'],
+ version=d.get('version'), date=d.get('date'),
+ collection_name=d.get('collection_name'))
+
+ for w in result._warnings:
+ warn('Both option {option} and its alias {alias} are set.'.format(option=w['option'], alias=w['alias']))
+
+ return result
diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py
new file mode 100644
index 0000000..fdb9108
--- /dev/null
+++ b/lib/ansible/module_utils/common/collections.py
@@ -0,0 +1,112 @@
+# Copyright: (c) 2018, Sviatoslav Sydorenko <ssydoren@redhat.com>
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""Collection of low-level utility functions."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common._collections_compat import Hashable, Mapping, MutableMapping, Sequence
+
+
+class ImmutableDict(Hashable, Mapping):
+ """Dictionary that cannot be updated"""
+ def __init__(self, *args, **kwargs):
+ self._store = dict(*args, **kwargs)
+
+ def __getitem__(self, key):
+ return self._store[key]
+
+ def __iter__(self):
+ return self._store.__iter__()
+
+ def __len__(self):
+ return self._store.__len__()
+
+ def __hash__(self):
+ return hash(frozenset(self.items()))
+
+ def __eq__(self, other):
+ try:
+ if self.__hash__() == hash(other):
+ return True
+ except TypeError:
+ pass
+
+ return False
+
+ def __repr__(self):
+ return 'ImmutableDict({0})'.format(repr(self._store))
+
+ def union(self, overriding_mapping):
+ """
+ Create an ImmutableDict as a combination of the original and overriding_mapping
+
+ :arg overriding_mapping: A Mapping of replacement and additional items
+ :return: A copy of the ImmutableDict with key-value pairs from the overriding_mapping added
+
+ If any of the keys in overriding_mapping are already present in the original ImmutableDict,
+ the overriding_mapping item replaces the one in the original ImmutableDict.
+ """
+ return ImmutableDict(self._store, **overriding_mapping)
+
+ def difference(self, subtractive_iterable):
+ """
+ Create an ImmutableDict as a combination of the original minus keys in subtractive_iterable
+
+ :arg subtractive_iterable: Any iterable containing keys that should not be present in the
+ new ImmutableDict
+ :return: A copy of the ImmutableDict with keys from the subtractive_iterable removed
+ """
+ remove_keys = frozenset(subtractive_iterable)
+ keys = (k for k in self._store.keys() if k not in remove_keys)
+ return ImmutableDict((k, self._store[k]) for k in keys)
+
+
+def is_string(seq):
+ """Identify whether the input has a string-like type (inclding bytes)."""
+ # AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object
+ return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False)
+
+
+def is_iterable(seq, include_strings=False):
+ """Identify whether the input is an iterable."""
+ if not include_strings and is_string(seq):
+ return False
+
+ try:
+ iter(seq)
+ return True
+ except TypeError:
+ return False
+
+
+def is_sequence(seq, include_strings=False):
+ """Identify whether the input is a sequence.
+
+ Strings and bytes are not sequences here,
+ unless ``include_string`` is ``True``.
+
+ Non-indexable things are never of a sequence type.
+ """
+ if not include_strings and is_string(seq):
+ return False
+
+ return isinstance(seq, Sequence)
+
+
+def count(seq):
+ """Returns a dictionary with the number of appearances of each element of the iterable.
+
+ Resembles the collections.Counter class functionality. It is meant to be used when the
+ code is run on Python 2.6.* where collections.Counter is not available. It should be
+ deprecated and replaced when support for Python < 2.7 is dropped.
+ """
+ if not is_iterable(seq):
+ raise Exception('Argument provided is not an iterable')
+ counters = dict()
+ for elem in seq:
+ counters[elem] = counters.get(elem, 0) + 1
+ return counters
diff --git a/lib/ansible/module_utils/common/dict_transformations.py b/lib/ansible/module_utils/common/dict_transformations.py
new file mode 100644
index 0000000..ffd0645
--- /dev/null
+++ b/lib/ansible/module_utils/common/dict_transformations.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import re
+from copy import deepcopy
+
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+def camel_dict_to_snake_dict(camel_dict, reversible=False, ignore_list=()):
+ """
+ reversible allows two way conversion of a camelized dict
+ such that snake_dict_to_camel_dict(camel_dict_to_snake_dict(x)) == x
+
+ This is achieved through mapping e.g. HTTPEndpoint to h_t_t_p_endpoint
+ where the default would be simply http_endpoint, which gets turned into
+ HttpEndpoint if recamelized.
+
+ ignore_list is used to avoid converting a sub-tree of a dict. This is
+ particularly important for tags, where keys are case-sensitive. We convert
+ the 'Tags' key but nothing below.
+ """
+
+ def value_is_list(camel_list):
+
+ checked_list = []
+ for item in camel_list:
+ if isinstance(item, dict):
+ checked_list.append(camel_dict_to_snake_dict(item, reversible))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ return checked_list
+
+ snake_dict = {}
+ for k, v in camel_dict.items():
+ if isinstance(v, dict) and k not in ignore_list:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = camel_dict_to_snake_dict(v, reversible)
+ elif isinstance(v, list) and k not in ignore_list:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = value_is_list(v)
+ else:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = v
+
+ return snake_dict
+
+
+def snake_dict_to_camel_dict(snake_dict, capitalize_first=False):
+ """
+ Perhaps unexpectedly, snake_dict_to_camel_dict returns dromedaryCase
+ rather than true CamelCase. Passing capitalize_first=True returns
+ CamelCase. The default remains False as that was the original implementation
+ """
+
+ def camelize(complex_type, capitalize_first=False):
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ new_type[_snake_to_camel(key, capitalize_first)] = camelize(complex_type[key], capitalize_first)
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(camelize(complex_type[i], capitalize_first))
+ else:
+ return complex_type
+ return new_type
+
+ return camelize(snake_dict, capitalize_first)
+
+
+def _snake_to_camel(snake, capitalize_first=False):
+ if capitalize_first:
+ return ''.join(x.capitalize() or '_' for x in snake.split('_'))
+ else:
+ return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
+
+
+def _camel_to_snake(name, reversible=False):
+
+ def prepend_underscore_and_lower(m):
+ return '_' + m.group(0).lower()
+
+ if reversible:
+ upper_pattern = r'[A-Z]'
+ else:
+ # Cope with pluralized abbreviations such as TargetGroupARNs
+ # that would otherwise be rendered target_group_ar_ns
+ upper_pattern = r'[A-Z]{3,}s$'
+
+ s1 = re.sub(upper_pattern, prepend_underscore_and_lower, name)
+ # Handle when there was nothing before the plural_pattern
+ if s1.startswith("_") and not name.startswith("_"):
+ s1 = s1[1:]
+ if reversible:
+ return s1
+
+ # Remainder of solution seems to be https://stackoverflow.com/a/1176023
+ first_cap_pattern = r'(.)([A-Z][a-z]+)'
+ all_cap_pattern = r'([a-z0-9])([A-Z]+)'
+ s2 = re.sub(first_cap_pattern, r'\1_\2', s1)
+ return re.sub(all_cap_pattern, r'\1_\2', s2).lower()
+
+
+def dict_merge(a, b):
+ '''recursively merges dicts. not just simple a['key'] = b['key'], if
+ both a and b have a key whose value is a dict then dict_merge is called
+ on both values and the result stored in the returned dictionary.'''
+ if not isinstance(b, dict):
+ return b
+ result = deepcopy(a)
+ for k, v in b.items():
+ if k in result and isinstance(result[k], dict):
+ result[k] = dict_merge(result[k], v)
+ else:
+ result[k] = deepcopy(v)
+ return result
+
+
+def recursive_diff(dict1, dict2):
+ """Recursively diff two dictionaries
+
+ Raises ``TypeError`` for incorrect argument type.
+
+ :arg dict1: Dictionary to compare against.
+ :arg dict2: Dictionary to compare with ``dict1``.
+ :return: Tuple of dictionaries of differences or ``None`` if there are no differences.
+ """
+
+ if not all((isinstance(item, MutableMapping) for item in (dict1, dict2))):
+ raise TypeError("Unable to diff 'dict1' %s and 'dict2' %s. "
+ "Both must be a dictionary." % (type(dict1), type(dict2)))
+
+ left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
+ right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
+ for k in (set(dict1.keys()) & set(dict2.keys())):
+ if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
+ result = recursive_diff(dict1[k], dict2[k])
+ if result:
+ left[k] = result[0]
+ right[k] = result[1]
+ elif dict1[k] != dict2[k]:
+ left[k] = dict1[k]
+ right[k] = dict2[k]
+ if left or right:
+ return left, right
+ return None
diff --git a/lib/ansible/module_utils/common/file.py b/lib/ansible/module_utils/common/file.py
new file mode 100644
index 0000000..1e83660
--- /dev/null
+++ b/lib/ansible/module_utils/common/file.py
@@ -0,0 +1,205 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import stat
+import re
+import pwd
+import grp
+import time
+import shutil
+import traceback
+import fcntl
+import sys
+
+from contextlib import contextmanager
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import b, binary_type
+from ansible.module_utils.common.warnings import deprecate
+
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ HAVE_SELINUX = False
+
+
+FILE_ATTRIBUTES = {
+ 'A': 'noatime',
+ 'a': 'append',
+ 'c': 'compressed',
+ 'C': 'nocow',
+ 'd': 'nodump',
+ 'D': 'dirsync',
+ 'e': 'extents',
+ 'E': 'encrypted',
+ 'h': 'blocksize',
+ 'i': 'immutable',
+ 'I': 'indexed',
+ 'j': 'journalled',
+ 'N': 'inline',
+ 's': 'zero',
+ 'S': 'synchronous',
+ 't': 'notail',
+ 'T': 'blockroot',
+ 'u': 'undelete',
+ 'X': 'compressedraw',
+ 'Z': 'compresseddirty',
+}
+
+
+# Used for parsing symbolic file perms
+MODE_OPERATOR_RE = re.compile(r'[+=-]')
+USERS_RE = re.compile(r'[^ugo]')
+PERMS_RE = re.compile(r'[^rwxXstugo]')
+
+
+_PERM_BITS = 0o7777 # file mode permission bits
+_EXEC_PERM_BITS = 0o0111 # execute permission bits
+_DEFAULT_PERM = 0o0666 # default file permission bits
+
+
+def is_executable(path):
+ # This function's signature needs to be repeated
+ # as the first line of its docstring.
+ # This method is reused by the basic module,
+ # the repetition helps the basic module's html documentation come out right.
+ # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature
+ '''is_executable(path)
+
+ is the given path executable?
+
+ :arg path: The path of the file to check.
+
+ Limitations:
+
+ * Does not account for FSACLs.
+ * Most times we really want to know "Can the current user execute this
+ file". This function does not tell us that, only if any execute bit is set.
+ '''
+ # These are all bitfields so first bitwise-or all the permissions we're
+ # looking for, then bitwise-and with the file's mode to determine if any
+ # execute bits are set.
+ return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
+
+
+def format_attributes(attributes):
+ attribute_list = [FILE_ATTRIBUTES.get(attr) for attr in attributes if attr in FILE_ATTRIBUTES]
+ return attribute_list
+
+
+def get_flags_from_attributes(attributes):
+ flags = [key for key, attr in FILE_ATTRIBUTES.items() if attr in attributes]
+ return ''.join(flags)
+
+
+def get_file_arg_spec():
+ arg_spec = dict(
+ mode=dict(type='raw'),
+ owner=dict(),
+ group=dict(),
+ seuser=dict(),
+ serole=dict(),
+ selevel=dict(),
+ setype=dict(),
+ attributes=dict(aliases=['attr']),
+ )
+ return arg_spec
+
+
+class LockTimeout(Exception):
+ pass
+
+
+class FileLock:
+ '''
+ Currently FileLock is implemented via fcntl.flock on a lock file, however this
+ behaviour may change in the future. Avoid mixing lock types fcntl.flock,
+ fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
+ unwanted and/or unexpected behaviour
+ '''
+ def __init__(self):
+ deprecate("FileLock is not reliable and has never been used in core for that reason. There is no current alternative that works across POSIX targets",
+ version='2.16')
+ self.lockfd = None
+
+ @contextmanager
+ def lock_file(self, path, tmpdir, lock_timeout=None):
+ '''
+ Context for lock acquisition
+ '''
+ try:
+ self.set_lock(path, tmpdir, lock_timeout)
+ yield
+ finally:
+ self.unlock()
+
+ def set_lock(self, path, tmpdir, lock_timeout=None):
+ '''
+ Create a lock file based on path with flock to prevent other processes
+ using given path.
+ Please note that currently file locking only works when it's executed by
+ the same user, I.E single user scenarios
+
+ :kw path: Path (file) to lock
+ :kw tmpdir: Path where to place the temporary .lock file
+ :kw lock_timeout:
+ Wait n seconds for lock acquisition, fail if timeout is reached.
+ 0 = Do not wait, fail if lock cannot be acquired immediately,
+ Default is None, wait indefinitely until lock is released.
+ :returns: True
+ '''
+ lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
+ l_wait = 0.1
+ r_exception = IOError
+ if sys.version_info[0] == 3:
+ r_exception = BlockingIOError
+
+ self.lockfd = open(lock_path, 'w')
+
+ if lock_timeout <= 0:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+
+ if lock_timeout:
+ e_secs = 0
+ while e_secs < lock_timeout:
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+ except r_exception:
+ time.sleep(l_wait)
+ e_secs += l_wait
+ continue
+
+ self.lockfd.close()
+ raise LockTimeout('{0} sec'.format(lock_timeout))
+
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+
+ return True
+
+ def unlock(self):
+ '''
+ Make sure lock file is available for everyone and Unlock the file descriptor
+ locked by set_lock
+
+ :returns: True
+ '''
+ if not self.lockfd:
+ return True
+
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_UN)
+ self.lockfd.close()
+ except ValueError: # file wasn't opened, let context manager fail gracefully
+ pass
+
+ return True
diff --git a/lib/ansible/module_utils/common/json.py b/lib/ansible/module_utils/common/json.py
new file mode 100644
index 0000000..727083c
--- /dev/null
+++ b/lib/ansible/module_utils/common/json.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import datetime
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.common.collections import is_sequence
+
+
+def _is_unsafe(value):
+ return getattr(value, '__UNSAFE__', False) and not getattr(value, '__ENCRYPTED__', False)
+
+
+def _is_vault(value):
+ return getattr(value, '__ENCRYPTED__', False)
+
+
+def _preprocess_unsafe_encode(value):
+ """Recursively preprocess a data structure converting instances of ``AnsibleUnsafe``
+ into their JSON dict representations
+
+ Used in ``AnsibleJSONEncoder.iterencode``
+ """
+ if _is_unsafe(value):
+ value = {'__ansible_unsafe': to_text(value, errors='surrogate_or_strict', nonstring='strict')}
+ elif is_sequence(value):
+ value = [_preprocess_unsafe_encode(v) for v in value]
+ elif isinstance(value, Mapping):
+ value = dict((k, _preprocess_unsafe_encode(v)) for k, v in value.items())
+
+ return value
+
+
+def json_dump(structure):
+ return json.dumps(structure, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
+
+
+class AnsibleJSONEncoder(json.JSONEncoder):
+ '''
+ Simple encoder class to deal with JSON encoding of Ansible internal types
+ '''
+
+ def __init__(self, preprocess_unsafe=False, vault_to_text=False, **kwargs):
+ self._preprocess_unsafe = preprocess_unsafe
+ self._vault_to_text = vault_to_text
+ super(AnsibleJSONEncoder, self).__init__(**kwargs)
+
+ # NOTE: ALWAYS inform AWS/Tower when new items get added as they consume them downstream via a callback
+ def default(self, o):
+ if getattr(o, '__ENCRYPTED__', False):
+ # vault object
+ if self._vault_to_text:
+ value = to_text(o, errors='surrogate_or_strict')
+ else:
+ value = {'__ansible_vault': to_text(o._ciphertext, errors='surrogate_or_strict', nonstring='strict')}
+ elif getattr(o, '__UNSAFE__', False):
+ # unsafe object, this will never be triggered, see ``AnsibleJSONEncoder.iterencode``
+ value = {'__ansible_unsafe': to_text(o, errors='surrogate_or_strict', nonstring='strict')}
+ elif isinstance(o, Mapping):
+ # hostvars and other objects
+ value = dict(o)
+ elif isinstance(o, (datetime.date, datetime.datetime)):
+ # date object
+ value = o.isoformat()
+ else:
+ # use default encoder
+ value = super(AnsibleJSONEncoder, self).default(o)
+ return value
+
+ def iterencode(self, o, **kwargs):
+ """Custom iterencode, primarily design to handle encoding ``AnsibleUnsafe``
+ as the ``AnsibleUnsafe`` subclasses inherit from string types and
+ ``json.JSONEncoder`` does not support custom encoders for string types
+ """
+ if self._preprocess_unsafe:
+ o = _preprocess_unsafe_encode(o)
+
+ return super(AnsibleJSONEncoder, self).iterencode(o, **kwargs)
diff --git a/lib/ansible/module_utils/common/locale.py b/lib/ansible/module_utils/common/locale.py
new file mode 100644
index 0000000..a6068c8
--- /dev/null
+++ b/lib/ansible/module_utils/common/locale.py
@@ -0,0 +1,61 @@
+# Copyright (c), Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+
+
+def get_best_parsable_locale(module, preferences=None, raise_on_locale=False):
+ '''
+ Attempts to return the best possible locale for parsing output in English
+ useful for scraping output with i18n tools. When this raises an exception
+ and the caller wants to continue, it should use the 'C' locale.
+
+ :param module: an AnsibleModule instance
+ :param preferences: A list of preferred locales, in order of preference
+ :param raise_on_locale: boolean that determines if we raise exception or not
+ due to locale CLI issues
+ :returns: The first matched preferred locale or 'C' which is the default
+ '''
+
+ found = 'C' # default posix, its ascii but always there
+ try:
+ locale = module.get_bin_path("locale")
+ if not locale:
+ # not using required=true as that forces fail_json
+ raise RuntimeWarning("Could not find 'locale' tool")
+
+ available = []
+
+ if preferences is None:
+ # new POSIX standard or English cause those are messages core team expects
+ # yes, the last 2 are the same but some systems are weird
+ preferences = ['C.utf8', 'C.UTF-8', 'en_US.utf8', 'en_US.UTF-8', 'C', 'POSIX']
+
+ rc, out, err = module.run_command([locale, '-a'])
+
+ if rc == 0:
+ if out:
+ available = out.strip().splitlines()
+ else:
+ raise RuntimeWarning("No output from locale, rc=%s: %s" % (rc, to_native(err)))
+ else:
+ raise RuntimeWarning("Unable to get locale information, rc=%s: %s" % (rc, to_native(err)))
+
+ if available:
+ for pref in preferences:
+ if pref in available:
+ found = pref
+ break
+
+ except RuntimeWarning as e:
+ if raise_on_locale:
+ raise
+ else:
+ module.debug('Failed to get locale information: %s' % to_native(e))
+
+ module.debug('Matched preferred locale to: %s' % found)
+
+ return found
diff --git a/lib/ansible/module_utils/common/network.py b/lib/ansible/module_utils/common/network.py
new file mode 100644
index 0000000..c3874f8
--- /dev/null
+++ b/lib/ansible/module_utils/common/network.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2016 Red Hat Inc
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# General networking tools that may be used by all modules
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from struct import pack
+from socket import inet_ntoa
+
+from ansible.module_utils.six.moves import zip
+
+
+VALID_MASKS = [2**8 - 2**i for i in range(0, 9)]
+
+
+def is_netmask(val):
+ parts = str(val).split('.')
+ if not len(parts) == 4:
+ return False
+ for part in parts:
+ try:
+ if int(part) not in VALID_MASKS:
+ raise ValueError
+ except ValueError:
+ return False
+ return True
+
+
+def is_masklen(val):
+ try:
+ return 0 <= int(val) <= 32
+ except ValueError:
+ return False
+
+
+def to_netmask(val):
+ """ converts a masklen to a netmask """
+ if not is_masklen(val):
+ raise ValueError('invalid value for masklen')
+
+ bits = 0
+ for i in range(32 - int(val), 32):
+ bits |= (1 << i)
+
+ return inet_ntoa(pack('>I', bits))
+
+
+def to_masklen(val):
+ """ converts a netmask to a masklen """
+ if not is_netmask(val):
+ raise ValueError('invalid value for netmask: %s' % val)
+
+ bits = list()
+ for x in val.split('.'):
+ octet = bin(int(x)).count('1')
+ bits.append(octet)
+
+ return sum(bits)
+
+
+def to_subnet(addr, mask, dotted_notation=False):
+ """ coverts an addr / mask pair to a subnet in cidr notation """
+ try:
+ if not is_masklen(mask):
+ raise ValueError
+ cidr = int(mask)
+ mask = to_netmask(mask)
+ except ValueError:
+ cidr = to_masklen(mask)
+
+ addr = addr.split('.')
+ mask = mask.split('.')
+
+ network = list()
+ for s_addr, s_mask in zip(addr, mask):
+ network.append(str(int(s_addr) & int(s_mask)))
+
+ if dotted_notation:
+ return '%s %s' % ('.'.join(network), to_netmask(cidr))
+ return '%s/%s' % ('.'.join(network), cidr)
+
+
+def to_ipv6_subnet(addr):
+ """ IPv6 addresses are eight groupings. The first four groupings (64 bits) comprise the subnet address. """
+
+ # https://tools.ietf.org/rfc/rfc2374.txt
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first four groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 4:
+ break
+ if len(found_groups) < 4:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_ipv6_network(addr):
+ """ IPv6 addresses are eight groupings. The first three groupings (48 bits) comprise the network address. """
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first three groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 3:
+ break
+ if len(found_groups) < 3:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_bits(val):
+ """ converts a netmask to bits """
+ bits = ''
+ for octet in val.split('.'):
+ bits += bin(int(octet))[2:].zfill(8)
+ return bits
+
+
+def is_mac(mac_address):
+ """
+ Validate MAC address for given string
+ Args:
+ mac_address: string to validate as MAC address
+
+ Returns: (Boolean) True if string is valid MAC address, otherwise False
+ """
+ mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
+ return bool(mac_addr_regex.match(mac_address.lower()))
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
new file mode 100644
index 0000000..059ca0a
--- /dev/null
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -0,0 +1,940 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import datetime
+import os
+
+from collections import deque
+from itertools import chain
+
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common.text.formatters import lenient_lowercase
+from ansible.module_utils.common.warnings import warn
+from ansible.module_utils.errors import (
+ AliasError,
+ AnsibleFallbackNotFound,
+ AnsibleValidationErrorMultiple,
+ ArgumentTypeError,
+ ArgumentValueError,
+ ElementError,
+ MutuallyExclusiveError,
+ NoLogError,
+ RequiredByError,
+ RequiredError,
+ RequiredIfError,
+ RequiredOneOfError,
+ RequiredTogetherError,
+ SubParameterTypeError,
+)
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+
+from ansible.module_utils.common._collections_compat import (
+ KeysView,
+ Set,
+ Sequence,
+ Mapping,
+ MutableMapping,
+ MutableSet,
+ MutableSequence,
+)
+
+from ansible.module_utils.six import (
+ binary_type,
+ integer_types,
+ string_types,
+ text_type,
+ PY2,
+ PY3,
+)
+
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments,
+ check_required_together,
+ check_required_one_of,
+ check_required_if,
+ check_required_by,
+ check_type_bits,
+ check_type_bool,
+ check_type_bytes,
+ check_type_dict,
+ check_type_float,
+ check_type_int,
+ check_type_jsonarg,
+ check_type_list,
+ check_type_path,
+ check_type_raw,
+ check_type_str,
+)
+
+# Python2 & 3 way to get NoneType
+NoneType = type(None)
+
+_ADDITIONAL_CHECKS = (
+ {'func': check_required_together, 'attr': 'required_together', 'err': RequiredTogetherError},
+ {'func': check_required_one_of, 'attr': 'required_one_of', 'err': RequiredOneOfError},
+ {'func': check_required_if, 'attr': 'required_if', 'err': RequiredIfError},
+ {'func': check_required_by, 'attr': 'required_by', 'err': RequiredByError},
+)
+
+# if adding boolean attribute, also add to PASS_BOOL
+# some of this dupes defaults from controller config
+PASS_VARS = {
+ 'check_mode': ('check_mode', False),
+ 'debug': ('_debug', False),
+ 'diff': ('_diff', False),
+ 'keep_remote_files': ('_keep_remote_files', False),
+ 'module_name': ('_name', None),
+ 'no_log': ('no_log', False),
+ 'remote_tmp': ('_remote_tmp', None),
+ 'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']),
+ 'shell_executable': ('_shell', '/bin/sh'),
+ 'socket': ('_socket_path', None),
+ 'string_conversion_action': ('_string_conversion_action', 'warn'),
+ 'syslog_facility': ('_syslog_facility', 'INFO'),
+ 'tmpdir': ('_tmpdir', None),
+ 'verbosity': ('_verbosity', 0),
+ 'version': ('ansible_version', '0.0'),
+}
+
+PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'no_log')
+
+DEFAULT_TYPE_VALIDATORS = {
+ 'str': check_type_str,
+ 'list': check_type_list,
+ 'dict': check_type_dict,
+ 'bool': check_type_bool,
+ 'int': check_type_int,
+ 'float': check_type_float,
+ 'path': check_type_path,
+ 'raw': check_type_raw,
+ 'jsonarg': check_type_jsonarg,
+ 'json': check_type_jsonarg,
+ 'bytes': check_type_bytes,
+ 'bits': check_type_bits,
+}
+
+
+def _get_type_validator(wanted):
+ """Returns the callable used to validate a wanted type and the type name.
+
+ :arg wanted: String or callable. If a string, get the corresponding
+ validation function from DEFAULT_TYPE_VALIDATORS. If callable,
+ get the name of the custom callable and return that for the type_checker.
+
+ :returns: Tuple of callable function or None, and a string that is the name
+ of the wanted type.
+ """
+
+ # Use one of our builtin validators.
+ if not callable(wanted):
+ if wanted is None:
+ # Default type for parameters
+ wanted = 'str'
+
+ type_checker = DEFAULT_TYPE_VALIDATORS.get(wanted)
+
+ # Use the custom callable for validation.
+ else:
+ type_checker = wanted
+ wanted = getattr(wanted, '__name__', to_native(type(wanted)))
+
+ return type_checker, wanted
+
+
+def _get_legal_inputs(argument_spec, parameters, aliases=None):
+ if aliases is None:
+ aliases = _handle_aliases(argument_spec, parameters)
+
+ return list(aliases.keys()) + list(argument_spec.keys())
+
+
+def _get_unsupported_parameters(argument_spec, parameters, legal_inputs=None, options_context=None, store_supported=None):
+ """Check keys in parameters against those provided in legal_inputs
+ to ensure they contain legal values. If legal_inputs are not supplied,
+ they will be generated using the argument_spec.
+
+ :arg argument_spec: Dictionary of parameters, their type, and valid values.
+ :arg parameters: Dictionary of parameters.
+ :arg legal_inputs: List of valid key names property names. Overrides values
+ in argument_spec.
+ :arg options_context: List of parent keys for tracking the context of where
+ a parameter is defined.
+
+ :returns: Set of unsupported parameters. Empty set if no unsupported parameters
+ are found.
+ """
+
+ if legal_inputs is None:
+ legal_inputs = _get_legal_inputs(argument_spec, parameters)
+
+ unsupported_parameters = set()
+ for k in parameters.keys():
+ if k not in legal_inputs:
+ context = k
+ if options_context:
+ context = tuple(options_context + [k])
+
+ unsupported_parameters.add(context)
+
+ if store_supported is not None:
+ supported_aliases = _handle_aliases(argument_spec, parameters)
+ supported_params = []
+ for option in legal_inputs:
+ if option in supported_aliases:
+ continue
+ supported_params.append(option)
+
+ store_supported.update({context: (supported_params, supported_aliases)})
+
+ return unsupported_parameters
+
+
+def _handle_aliases(argument_spec, parameters, alias_warnings=None, alias_deprecations=None):
+ """Process aliases from an argument_spec including warnings and deprecations.
+
+ Modify ``parameters`` by adding a new key for each alias with the supplied
+ value from ``parameters``.
+
+ If a list is provided to the alias_warnings parameter, it will be filled with tuples
+ (option, alias) in every case where both an option and its alias are specified.
+
+ If a list is provided to alias_deprecations, it will be populated with dictionaries,
+ each containing deprecation information for each alias found in argument_spec.
+
+ :param argument_spec: Dictionary of parameters, their type, and valid values.
+ :type argument_spec: dict
+
+ :param parameters: Dictionary of parameters.
+ :type parameters: dict
+
+ :param alias_warnings:
+ :type alias_warnings: list
+
+ :param alias_deprecations:
+ :type alias_deprecations: list
+ """
+
+ aliases_results = {} # alias:canon
+
+ for (k, v) in argument_spec.items():
+ aliases = v.get('aliases', None)
+ default = v.get('default', None)
+ required = v.get('required', False)
+
+ if alias_deprecations is not None:
+ for alias in argument_spec[k].get('deprecated_aliases', []):
+ if alias.get('name') in parameters:
+ alias_deprecations.append(alias)
+
+ if default is not None and required:
+ # not alias specific but this is a good place to check this
+ raise ValueError("internal error: required and default are mutually exclusive for %s" % k)
+
+ if aliases is None:
+ continue
+
+ if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)):
+ raise TypeError('internal error: aliases must be a list or tuple')
+
+ for alias in aliases:
+ aliases_results[alias] = k
+ if alias in parameters:
+ if k in parameters and alias_warnings is not None:
+ alias_warnings.append((k, alias))
+ parameters[k] = parameters[alias]
+
+ return aliases_results
+
+
+def _list_deprecations(argument_spec, parameters, prefix=''):
+ """Return a list of deprecations
+
+ :arg argument_spec: An argument spec dictionary
+ :arg parameters: Dictionary of parameters
+
+ :returns: List of dictionaries containing a message and version in which
+ the deprecated parameter will be removed, or an empty list.
+
+ :Example return:
+
+ .. code-block:: python
+
+ [
+ {
+ 'msg': "Param 'deptest' is deprecated. See the module docs for more information",
+ 'version': '2.9'
+ }
+ ]
+ """
+
+ deprecations = []
+ for arg_name, arg_opts in argument_spec.items():
+ if arg_name in parameters:
+ if prefix:
+ sub_prefix = '%s["%s"]' % (prefix, arg_name)
+ else:
+ sub_prefix = arg_name
+ if arg_opts.get('removed_at_date') is not None:
+ deprecations.append({
+ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix,
+ 'date': arg_opts.get('removed_at_date'),
+ 'collection_name': arg_opts.get('removed_from_collection'),
+ })
+ elif arg_opts.get('removed_in_version') is not None:
+ deprecations.append({
+ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix,
+ 'version': arg_opts.get('removed_in_version'),
+ 'collection_name': arg_opts.get('removed_from_collection'),
+ })
+ # Check sub-argument spec
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ sub_arguments = parameters[arg_name]
+ if isinstance(sub_arguments, Mapping):
+ sub_arguments = [sub_arguments]
+ if isinstance(sub_arguments, list):
+ for sub_params in sub_arguments:
+ if isinstance(sub_params, Mapping):
+ deprecations.extend(_list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
+
+ return deprecations
+
+
+def _list_no_log_values(argument_spec, params):
+ """Return set of no log values
+
+ :arg argument_spec: An argument spec dictionary
+ :arg params: Dictionary of all parameters
+
+ :returns: :class:`set` of strings that should be hidden from output:
+ """
+
+ no_log_values = set()
+ for arg_name, arg_opts in argument_spec.items():
+ if arg_opts.get('no_log', False):
+ # Find the value for the no_log'd param
+ no_log_object = params.get(arg_name, None)
+
+ if no_log_object:
+ try:
+ no_log_values.update(_return_datastructure_name(no_log_object))
+ except TypeError as e:
+ raise TypeError('Failed to convert "%s": %s' % (arg_name, to_native(e)))
+
+ # Get no_log values from suboptions
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ wanted_type = arg_opts.get('type')
+ sub_parameters = params.get(arg_name)
+
+ if sub_parameters is not None:
+ if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'):
+ # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list.
+ if not isinstance(sub_parameters, list):
+ sub_parameters = [sub_parameters]
+
+ for sub_param in sub_parameters:
+ # Validate dict fields in case they came in as strings
+
+ if isinstance(sub_param, string_types):
+ sub_param = check_type_dict(sub_param)
+
+ if not isinstance(sub_param, Mapping):
+ raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
+ "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
+
+ no_log_values.update(_list_no_log_values(sub_argument_spec, sub_param))
+
+ return no_log_values
+
+
+def _return_datastructure_name(obj):
+ """ Return native stringified values from datastructures.
+
+ For use with removing sensitive values pre-jsonification."""
+ if isinstance(obj, (text_type, binary_type)):
+ if obj:
+ yield to_native(obj, errors='surrogate_or_strict')
+ return
+ elif isinstance(obj, Mapping):
+ for element in obj.items():
+ for subelement in _return_datastructure_name(element[1]):
+ yield subelement
+ elif is_iterable(obj):
+ for element in obj:
+ for subelement in _return_datastructure_name(element):
+ yield subelement
+ elif obj is None or isinstance(obj, bool):
+ # This must come before int because bools are also ints
+ return
+ elif isinstance(obj, tuple(list(integer_types) + [float])):
+ yield to_native(obj, nonstring='simplerepr')
+ else:
+ raise TypeError('Unknown parameter type: %s' % (type(obj)))
+
+
+def _remove_values_conditions(value, no_log_strings, deferred_removals):
+ """
+ Helper function for :meth:`remove_values`.
+
+ :arg value: The value to check for strings that need to be stripped
+ :arg no_log_strings: set of strings which must be stripped out of any values
+ :arg deferred_removals: List which holds information about nested
+ containers that have to be iterated for removals. It is passed into
+ this function so that more entries can be added to it if value is
+ a container type. The format of each entry is a 2-tuple where the first
+ element is the ``value`` parameter and the second value is a new
+ container to copy the elements of ``value`` into once iterated.
+
+ :returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
+
+ 1. :class:`~datetime.datetime` objects which are changed into a string representation.
+ 2. objects which are in ``no_log_strings`` are replaced with a placeholder
+ so that no sensitive data is leaked.
+
+ If ``value`` is a container type, returns a new empty container.
+
+ ``deferred_removals`` is added to as a side-effect of this function.
+
+ .. warning:: It is up to the caller to make sure the order in which value
+ is passed in is correct. For instance, higher level containers need
+ to be passed in before lower level containers. For example, given
+ ``{'level1': {'level2': 'level3': [True]} }`` first pass in the
+ dictionary for ``level1``, then the dict for ``level2``, and finally
+ the list for ``level3``.
+ """
+ if isinstance(value, (text_type, binary_type)):
+ # Need native str type
+ native_str_value = value
+ if isinstance(value, text_type):
+ value_is_text = True
+ if PY2:
+ native_str_value = to_bytes(value, errors='surrogate_or_strict')
+ elif isinstance(value, binary_type):
+ value_is_text = False
+ if PY3:
+ native_str_value = to_text(value, errors='surrogate_or_strict')
+
+ if native_str_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ native_str_value = native_str_value.replace(omit_me, '*' * 8)
+
+ if value_is_text and isinstance(native_str_value, binary_type):
+ value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ elif not value_is_text and isinstance(native_str_value, text_type):
+ value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ else:
+ value = native_str_value
+
+ elif isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
+ if stringy_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ if omit_me in stringy_value:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ elif isinstance(value, (datetime.datetime, datetime.date)):
+ value = value.isoformat()
+ else:
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+ return value
+
+
+def _set_defaults(argument_spec, parameters, set_default=True):
+ """Set default values for parameters when no value is supplied.
+
+ Modifies parameters directly.
+
+ :arg argument_spec: Argument spec
+ :type argument_spec: dict
+
+ :arg parameters: Parameters to evaluate
+ :type parameters: dict
+
+ :kwarg set_default: Whether or not to set the default values
+ :type set_default: bool
+
+ :returns: Set of strings that should not be logged.
+ :rtype: set
+ """
+
+ no_log_values = set()
+ for param, value in argument_spec.items():
+
+ # TODO: Change the default value from None to Sentinel to differentiate between
+ # user supplied None and a default value set by this function.
+ default = value.get('default', None)
+
+ # This prevents setting defaults on required items on the 1st run,
+ # otherwise will set things without a default to None on the 2nd.
+ if param not in parameters and (default is not None or set_default):
+ # Make sure any default value for no_log fields are masked.
+ if value.get('no_log', False) and default:
+ no_log_values.add(default)
+
+ parameters[param] = default
+
+ return no_log_values
+
+
+def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_removals):
+ """ Helper method to :func:`sanitize_keys` to build ``deferred_removals`` and avoid deep recursion. """
+ if isinstance(value, (text_type, binary_type)):
+ return value
+
+ if isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ return value
+
+ if isinstance(value, (datetime.datetime, datetime.date)):
+ return value
+
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+
+def _validate_elements(wanted_type, parameter, values, options_context=None, errors=None):
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ type_checker, wanted_element_type = _get_type_validator(wanted_type)
+ validated_parameters = []
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_element_type == 'str' and isinstance(wanted_type, string_types):
+ if isinstance(parameter, string_types):
+ kwargs['param'] = parameter
+ elif isinstance(parameter, dict):
+ kwargs['param'] = list(parameter.keys())[0]
+
+ for value in values:
+ try:
+ validated_parameters.append(type_checker(value, **kwargs))
+ except (TypeError, ValueError) as e:
+ msg = "Elements value for option '%s'" % parameter
+ if options_context:
+ msg += " found in '%s'" % " -> ".join(options_context)
+ msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_element_type, to_native(e))
+ errors.append(ElementError(msg))
+ return validated_parameters
+
+
+def _validate_argument_types(argument_spec, parameters, prefix='', options_context=None, errors=None):
+ """Validate that parameter types match the type in the argument spec.
+
+ Determine the appropriate type checker function and run each
+ parameter value through that function. All error messages from type checker
+ functions are returned. If any parameter fails to validate, it will not
+ be in the returned parameters.
+
+ :arg argument_spec: Argument spec
+ :type argument_spec: dict
+
+ :arg parameters: Parameters
+ :type parameters: dict
+
+ :kwarg prefix: Name of the parent key that contains the spec. Used in the error message
+ :type prefix: str
+
+ :kwarg options_context: List of contexts?
+ :type options_context: list
+
+ :returns: Two item tuple containing validated and coerced parameters
+ and a list of any errors that were encountered.
+ :rtype: tuple
+
+ """
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ for param, spec in argument_spec.items():
+ if param not in parameters:
+ continue
+
+ value = parameters[param]
+ if value is None:
+ continue
+
+ wanted_type = spec.get('type')
+ type_checker, wanted_name = _get_type_validator(wanted_type)
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_name == 'str' and isinstance(wanted_type, string_types):
+ kwargs['param'] = list(parameters.keys())[0]
+
+ # Get the name of the parent key if this is a nested option
+ if prefix:
+ kwargs['prefix'] = prefix
+
+ try:
+ parameters[param] = type_checker(value, **kwargs)
+ elements_wanted_type = spec.get('elements', None)
+ if elements_wanted_type:
+ elements = parameters[param]
+ if wanted_type != 'list' or not isinstance(elements, list):
+ msg = "Invalid type %s for option '%s'" % (wanted_name, elements)
+ if options_context:
+ msg += " found in '%s'." % " -> ".join(options_context)
+ msg += ", elements value check is supported only with 'list' type"
+ errors.append(ArgumentTypeError(msg))
+ parameters[param] = _validate_elements(elements_wanted_type, param, elements, options_context, errors)
+
+ except (TypeError, ValueError) as e:
+ msg = "argument '%s' is of type %s" % (param, type(value))
+ if options_context:
+ msg += " found in '%s'." % " -> ".join(options_context)
+ msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
+ errors.append(ArgumentTypeError(msg))
+
+
+def _validate_argument_values(argument_spec, parameters, options_context=None, errors=None):
+ """Ensure all arguments have the requested values, and there are no stray arguments"""
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ for param, spec in argument_spec.items():
+ choices = spec.get('choices')
+ if choices is None:
+ continue
+
+ if isinstance(choices, (frozenset, KeysView, Sequence)) and not isinstance(choices, (binary_type, text_type)):
+ if param in parameters:
+ # Allow one or more when type='list' param with choices
+ if isinstance(parameters[param], list):
+ diff_list = [item for item in parameters[param] if item not in choices]
+ if diff_list:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ diff_str = ", ".join(diff_list)
+ msg = "value of %s must be one or more of: %s. Got no match for: %s" % (param, choices_str, diff_str)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentValueError(msg))
+ elif parameters[param] not in choices:
+ # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
+ # the value. If we can't figure this out, module author is responsible.
+ if parameters[param] == 'False':
+ overlap = BOOLEANS_FALSE.intersection(choices)
+ if len(overlap) == 1:
+ # Extract from a set
+ (parameters[param],) = overlap
+
+ if parameters[param] == 'True':
+ overlap = BOOLEANS_TRUE.intersection(choices)
+ if len(overlap) == 1:
+ (parameters[param],) = overlap
+
+ if parameters[param] not in choices:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ msg = "value of %s must be one of: %s, got: %s" % (param, choices_str, parameters[param])
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentValueError(msg))
+ else:
+ msg = "internal error: choices for argument %s are not iterable: %s" % (param, choices)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ errors.append(ArgumentTypeError(msg))
+
+
+def _validate_sub_spec(
+ argument_spec,
+ parameters,
+ prefix="",
+ options_context=None,
+ errors=None,
+ no_log_values=None,
+ unsupported_parameters=None,
+ supported_parameters=None,
+ alias_deprecations=None,
+):
+ """Validate sub argument spec.
+
+ This function is recursive.
+ """
+
+ if options_context is None:
+ options_context = []
+
+ if errors is None:
+ errors = AnsibleValidationErrorMultiple()
+
+ if no_log_values is None:
+ no_log_values = set()
+
+ if unsupported_parameters is None:
+ unsupported_parameters = set()
+ if supported_parameters is None:
+ supported_parameters = dict()
+
+ for param, value in argument_spec.items():
+ wanted = value.get('type')
+ if wanted == 'dict' or (wanted == 'list' and value.get('elements', '') == 'dict'):
+ sub_spec = value.get('options')
+ if value.get('apply_defaults', False):
+ if sub_spec is not None:
+ if parameters.get(param) is None:
+ parameters[param] = {}
+ else:
+ continue
+ elif sub_spec is None or param not in parameters or parameters[param] is None:
+ continue
+
+ # Keep track of context for warning messages
+ options_context.append(param)
+
+ # Make sure we can iterate over the elements
+ if not isinstance(parameters[param], Sequence) or isinstance(parameters[param], string_types):
+ elements = [parameters[param]]
+ else:
+ elements = parameters[param]
+
+ for idx, sub_parameters in enumerate(elements):
+ no_log_values.update(set_fallbacks(sub_spec, sub_parameters))
+
+ if not isinstance(sub_parameters, dict):
+ errors.append(SubParameterTypeError("value of '%s' must be of type dict or list of dicts" % param))
+ continue
+
+ # Set prefix for warning messages
+ new_prefix = prefix + param
+ if wanted == 'list':
+ new_prefix += '[%d]' % idx
+ new_prefix += '.'
+
+ alias_warnings = []
+ alias_deprecations_sub = []
+ try:
+ options_aliases = _handle_aliases(sub_spec, sub_parameters, alias_warnings, alias_deprecations_sub)
+ except (TypeError, ValueError) as e:
+ options_aliases = {}
+ errors.append(AliasError(to_native(e)))
+
+ for option, alias in alias_warnings:
+ warn('Both option %s%s and its alias %s%s are set.' % (new_prefix, option, new_prefix, alias))
+
+ if alias_deprecations is not None:
+ for deprecation in alias_deprecations_sub:
+ alias_deprecations.append({
+ 'name': '%s%s' % (new_prefix, deprecation['name']),
+ 'version': deprecation.get('version'),
+ 'date': deprecation.get('date'),
+ 'collection_name': deprecation.get('collection_name'),
+ })
+
+ try:
+ no_log_values.update(_list_no_log_values(sub_spec, sub_parameters))
+ except TypeError as te:
+ errors.append(NoLogError(to_native(te)))
+
+ legal_inputs = _get_legal_inputs(sub_spec, sub_parameters, options_aliases)
+ unsupported_parameters.update(
+ _get_unsupported_parameters(
+ sub_spec,
+ sub_parameters,
+ legal_inputs,
+ options_context,
+ store_supported=supported_parameters,
+ )
+ )
+
+ try:
+ check_mutually_exclusive(value.get('mutually_exclusive'), sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(MutuallyExclusiveError(to_native(e)))
+
+ no_log_values.update(_set_defaults(sub_spec, sub_parameters, False))
+
+ try:
+ check_required_arguments(sub_spec, sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(RequiredError(to_native(e)))
+
+ _validate_argument_types(sub_spec, sub_parameters, new_prefix, options_context, errors=errors)
+ _validate_argument_values(sub_spec, sub_parameters, options_context, errors=errors)
+
+ for check in _ADDITIONAL_CHECKS:
+ try:
+ check['func'](value.get(check['attr']), sub_parameters, options_context)
+ except TypeError as e:
+ errors.append(check['err'](to_native(e)))
+
+ no_log_values.update(_set_defaults(sub_spec, sub_parameters))
+
+ # Handle nested specs
+ _validate_sub_spec(
+ sub_spec, sub_parameters, new_prefix, options_context, errors, no_log_values,
+ unsupported_parameters, supported_parameters, alias_deprecations)
+
+ options_context.pop()
+
+
+def env_fallback(*args, **kwargs):
+ """Load value from environment variable"""
+
+ for arg in args:
+ if arg in os.environ:
+ return os.environ[arg]
+ raise AnsibleFallbackNotFound
+
+
+def set_fallbacks(argument_spec, parameters):
+ no_log_values = set()
+ for param, value in argument_spec.items():
+ fallback = value.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if param not in parameters and fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ fallback_value = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except AnsibleFallbackNotFound:
+ continue
+ else:
+ if value.get('no_log', False) and fallback_value:
+ no_log_values.add(fallback_value)
+ parameters[param] = fallback_value
+
+ return no_log_values
+
+
+def sanitize_keys(obj, no_log_strings, ignore_keys=frozenset()):
+ """Sanitize the keys in a container object by removing ``no_log`` values from key names.
+
+ This is a companion function to the :func:`remove_values` function. Similar to that function,
+ we make use of ``deferred_removals`` to avoid hitting maximum recursion depth in cases of
+ large data structures.
+
+ :arg obj: The container object to sanitize. Non-container objects are returned unmodified.
+ :arg no_log_strings: A set of string values we do not want logged.
+ :kwarg ignore_keys: A set of string values of keys to not sanitize.
+
+ :returns: An object with sanitized keys.
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _sanitize_keys_conditions(obj, no_log_strings, ignore_keys, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ if old_key in ignore_keys or old_key.startswith('_ansible'):
+ new_data[old_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ # Sanitize the old key. We take advantage of the sanitizing code in
+ # _remove_values_conditions() rather than recreating it here.
+ new_key = _remove_values_conditions(old_key, no_log_strings, None)
+ new_data[new_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ for elem in old_data:
+ new_elem = _sanitize_keys_conditions(elem, no_log_strings, ignore_keys, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from keys')
+
+ return new_value
+
+
+def remove_values(value, no_log_strings):
+ """Remove strings in ``no_log_strings`` from value.
+
+ If value is a container type, then remove a lot more.
+
+ Use of ``deferred_removals`` exists, rather than a pure recursive solution,
+ because of the potential to hit the maximum recursion depth when dealing with
+ large amounts of data (see `issue #24560 <https://github.com/ansible/ansible/issues/24560>`_).
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
+ new_data[old_key] = new_elem
+ else:
+ for elem in old_data:
+ new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from output')
+
+ return new_value
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
new file mode 100644
index 0000000..97761a4
--- /dev/null
+++ b/lib/ansible/module_utils/common/process.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.common.file import is_executable
+
+
+def get_bin_path(arg, opt_dirs=None, required=None):
+ '''
+ Find system executable in PATH. Raises ValueError if executable is not found.
+ Optional arguments:
+ - required: [Deprecated] Prior to 2.10, if executable is not found and required is true it raises an Exception.
+ In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.14.
+ - opt_dirs: optional list of directories to search in addition to PATH
+ In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of
+ modules, especially for gathering facts, depend on this behaviour.
+ If found return full path, otherwise raise ValueError.
+ '''
+ opt_dirs = [] if opt_dirs is None else opt_dirs
+
+ sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ paths = []
+ for d in opt_dirs:
+ if d is not None and os.path.exists(d):
+ paths.append(d)
+ paths += os.environ.get('PATH', '').split(os.pathsep)
+ bin_path = None
+ # mangle PATH to include /sbin dirs
+ for p in sbin_paths:
+ if p not in paths and os.path.exists(p):
+ paths.append(p)
+ for d in paths:
+ if not d:
+ continue
+ path = os.path.join(d, arg)
+ if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
+ bin_path = path
+ break
+ if bin_path is None:
+ raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths)))
+
+ return bin_path
diff --git a/lib/ansible/module_utils/common/respawn.py b/lib/ansible/module_utils/common/respawn.py
new file mode 100644
index 0000000..3bc526a
--- /dev/null
+++ b/lib/ansible/module_utils/common/respawn.py
@@ -0,0 +1,98 @@
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+import sys
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+def has_respawned():
+ return hasattr(sys.modules['__main__'], '_respawned')
+
+
+def respawn_module(interpreter_path):
+ """
+ Respawn the currently-running Ansible Python module under the specified Python interpreter.
+
+ Ansible modules that require libraries that are typically available only under well-known interpreters
+ (eg, ``yum``, ``apt``, ``dnf``) can use bespoke logic to determine the libraries they need are not
+ available, then call `respawn_module` to re-execute the current module under a different interpreter
+ and exit the current process when the new subprocess has completed. The respawned process inherits only
+ stdout/stderr from the current process.
+
+ Only a single respawn is allowed. ``respawn_module`` will fail on nested respawns. Modules are encouraged
+ to call `has_respawned()` to defensively guide behavior before calling ``respawn_module``, and to ensure
+ that the target interpreter exists, as ``respawn_module`` will not fail gracefully.
+
+ :arg interpreter_path: path to a Python interpreter to respawn the current module
+ """
+
+ if has_respawned():
+ raise Exception('module has already been respawned')
+
+ # FUTURE: we need a safe way to log that a respawn has occurred for forensic/debug purposes
+ payload = _create_payload()
+ stdin_read, stdin_write = os.pipe()
+ os.write(stdin_write, to_bytes(payload))
+ os.close(stdin_write)
+ rc = subprocess.call([interpreter_path, '--'], stdin=stdin_read)
+ sys.exit(rc) # pylint: disable=ansible-bad-function
+
+
+def probe_interpreters_for_module(interpreter_paths, module_name):
+ """
+ Probes a supplied list of Python interpreters, returning the first one capable of
+ importing the named module. This is useful when attempting to locate a "system
+ Python" where OS-packaged utility modules are located.
+
+ :arg interpreter_paths: iterable of paths to Python interpreters. The paths will be probed
+ in order, and the first path that exists and can successfully import the named module will
+ be returned (or ``None`` if probing fails for all supplied paths).
+ :arg module_name: fully-qualified Python module name to probe for (eg, ``selinux``)
+ """
+ for interpreter_path in interpreter_paths:
+ if not os.path.exists(interpreter_path):
+ continue
+ try:
+ rc = subprocess.call([interpreter_path, '-c', 'import {0}'.format(module_name)])
+ if rc == 0:
+ return interpreter_path
+ except Exception:
+ continue
+
+ return None
+
+
+def _create_payload():
+ from ansible.module_utils import basic
+ smuggled_args = getattr(basic, '_ANSIBLE_ARGS')
+ if not smuggled_args:
+ raise Exception('unable to access ansible.module_utils.basic._ANSIBLE_ARGS (not launched by AnsiballZ?)')
+ module_fqn = sys.modules['__main__']._module_fqn
+ modlib_path = sys.modules['__main__']._modlib_path
+ respawn_code_template = '''
+import runpy
+import sys
+
+module_fqn = '{module_fqn}'
+modlib_path = '{modlib_path}'
+smuggled_args = b"""{smuggled_args}""".strip()
+
+
+if __name__ == '__main__':
+ sys.path.insert(0, modlib_path)
+
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = smuggled_args
+
+ runpy.run_module(module_fqn, init_globals=dict(_respawned=True), run_name='__main__', alter_sys=True)
+ '''
+
+ respawn_code = respawn_code_template.format(module_fqn=module_fqn, modlib_path=modlib_path, smuggled_args=to_native(smuggled_args))
+
+ return respawn_code
diff --git a/lib/ansible/module_utils/common/sys_info.py b/lib/ansible/module_utils/common/sys_info.py
new file mode 100644
index 0000000..206b36c
--- /dev/null
+++ b/lib/ansible/module_utils/common/sys_info.py
@@ -0,0 +1,157 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import platform
+
+from ansible.module_utils import distro
+from ansible.module_utils.common._utils import get_all_subclasses
+
+
+__all__ = ('get_distribution', 'get_distribution_version', 'get_platform_subclass')
+
+
+def get_distribution():
+ '''
+ Return the name of the distribution the module is running on.
+
+ :rtype: NativeString or None
+ :returns: Name of the distribution the module is running on
+
+ This function attempts to determine what distribution the code is running
+ on and return a string representing that value. If the platform is Linux
+ and the distribution cannot be determined, it returns ``OtherLinux``.
+ '''
+ distribution = distro.id().capitalize()
+
+ if platform.system() == 'Linux':
+ if distribution == 'Amzn':
+ distribution = 'Amazon'
+ elif distribution == 'Rhel':
+ distribution = 'Redhat'
+ elif not distribution:
+ distribution = 'OtherLinux'
+
+ return distribution
+
+
+def get_distribution_version():
+ '''
+ Get the version of the distribution the code is running on
+
+ :rtype: NativeString or None
+ :returns: A string representation of the version of the distribution. If it
+ cannot determine the version, it returns an empty string. If this is not run on
+ a Linux machine it returns None.
+ '''
+ version = None
+
+ needs_best_version = frozenset((
+ u'centos',
+ u'debian',
+ ))
+
+ version = distro.version()
+ distro_id = distro.id()
+
+ if version is not None:
+ if distro_id in needs_best_version:
+ version_best = distro.version(best=True)
+
+ # CentoOS maintainers believe only the major version is appropriate
+ # but Ansible users desire minor version information, e.g., 7.5.
+ # https://github.com/ansible/ansible/issues/50141#issuecomment-449452781
+ if distro_id == u'centos':
+ version = u'.'.join(version_best.split(u'.')[:2])
+
+ # Debian does not include minor version in /etc/os-release.
+ # Bug report filed upstream requesting this be added to /etc/os-release
+ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=931197
+ if distro_id == u'debian':
+ version = version_best
+
+ else:
+ version = u''
+
+ return version
+
+
+def get_distribution_codename():
+ '''
+ Return the code name for this Linux Distribution
+
+ :rtype: NativeString or None
+ :returns: A string representation of the distribution's codename or None if not a Linux distro
+ '''
+ codename = None
+ if platform.system() == 'Linux':
+ # Until this gets merged and we update our bundled copy of distro:
+ # https://github.com/nir0s/distro/pull/230
+ # Fixes Fedora 28+ not having a code name and Ubuntu Xenial Xerus needing to be "xenial"
+ os_release_info = distro.os_release_info()
+ codename = os_release_info.get('version_codename')
+
+ if codename is None:
+ codename = os_release_info.get('ubuntu_codename')
+
+ if codename is None and distro.id() == 'ubuntu':
+ lsb_release_info = distro.lsb_release_info()
+ codename = lsb_release_info.get('codename')
+
+ if codename is None:
+ codename = distro.codename()
+ if codename == u'':
+ codename = None
+
+ return codename
+
+
+def get_platform_subclass(cls):
+ '''
+ Finds a subclass implementing desired functionality on the platform the code is running on
+
+ :arg cls: Class to find an appropriate subclass for
+ :returns: A class that implements the functionality on this platform
+
+ Some Ansible modules have different implementations depending on the platform they run on. This
+ function is used to select between the various implementations and choose one. You can look at
+ the implementation of the Ansible :ref:`User module<user_module>` module for an example of how to use this.
+
+ This function replaces ``basic.load_platform_subclass()``. When you port code, you need to
+ change the callers to be explicit about instantiating the class. For instance, code in the
+ Ansible User module changed from::
+
+ .. code-block:: python
+
+ # Old
+ class User:
+ def __new__(cls, args, kwargs):
+ return load_platform_subclass(User, args, kwargs)
+
+ # New
+ class User:
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(User)
+ return super(cls, new_cls).__new__(new_cls)
+ '''
+ this_platform = platform.system()
+ distribution = get_distribution()
+
+ subclass = None
+
+ # get the most specific superclass for this platform
+ if distribution is not None:
+ for sc in get_all_subclasses(cls):
+ if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
+ subclass = sc
+ if subclass is None:
+ for sc in get_all_subclasses(cls):
+ if sc.platform == this_platform and sc.distribution is None:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return subclass
diff --git a/lib/ansible/module_utils/common/text/__init__.py b/lib/ansible/module_utils/common/text/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/__init__.py
diff --git a/lib/ansible/module_utils/common/text/converters.py b/lib/ansible/module_utils/common/text/converters.py
new file mode 100644
index 0000000..5b25df4
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/converters.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import codecs
+import datetime
+import json
+
+from ansible.module_utils.common._collections_compat import Set
+from ansible.module_utils.six import (
+ PY3,
+ binary_type,
+ iteritems,
+ text_type,
+)
+
+try:
+ codecs.lookup_error('surrogateescape')
+ HAS_SURROGATEESCAPE = True
+except LookupError:
+ HAS_SURROGATEESCAPE = False
+
+
+_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_replace',
+ 'surrogate_or_strict',
+ 'surrogate_then_replace'))
+
+
+def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
+ """Make sure that a string is a byte string
+
+ :arg obj: An object to make sure is a byte string. In most cases this
+ will be either a text string or a byte string. However, with
+ ``nonstring='simplerepr'``, this can be used as a traceback-free
+ version of ``str(obj)``.
+ :kwarg encoding: The encoding to use to transform from a text string to
+ a byte string. Defaults to using 'utf-8'.
+ :kwarg errors: The error handler to use if the text string is not
+ encodable using the specified encoding. Any valid `codecs error
+ handler <https://docs.python.org/3/library/codecs.html#codec-base-classes>`_
+ may be specified. There are three additional error strategies
+ specifically aimed at helping people to port code. The first two are:
+
+ :surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
+ handler, otherwise it will use ``strict``
+ :surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
+ handler, otherwise it will use ``replace``.
+
+ Because ``surrogateescape`` was added in Python3 this usually means that
+ Python3 will use ``surrogateescape`` and Python2 will use the fallback
+ error handler. Note that the code checks for ``surrogateescape`` when the
+ module is imported. If you have a backport of ``surrogateescape`` for
+ Python2, be sure to register the error handler prior to importing this
+ module.
+
+ The last error handler is:
+
+ :surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
+ handler. If encoding with ``surrogateescape`` would traceback,
+ surrogates are first replaced with a replacement characters
+ and then the string is encoded using ``replace`` (which replaces
+ the rest of the nonencodable bytes). If ``surrogateescape`` is
+ not present it will simply use ``replace``. (Added in Ansible 2.3)
+ This strategy is designed to never traceback when it attempts
+ to encode a string.
+
+ The default until Ansible-2.2 was ``surrogate_or_replace``
+ From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
+
+ :kwarg nonstring: The strategy to use if a nonstring is specified in
+ ``obj``. Default is 'simplerepr'. Valid values are:
+
+ :simplerepr: The default. This takes the ``str`` of the object and
+ then returns the bytes version of that string.
+ :empty: Return an empty byte string
+ :passthru: Return the object passed in
+ :strict: Raise a :exc:`TypeError`
+
+ :returns: Typically this returns a byte string. If a nonstring object is
+ passed in this may be a different type depending on the strategy
+ specified by nonstring. This will never return a text string.
+
+ .. note:: If passed a byte string, this function does not check that the
+ string is valid in the specified encoding. If it's important that the
+ byte string is in the specified encoding do::
+
+ encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
+
+ .. version_changed:: 2.3
+
+ Added the ``surrogate_then_replace`` error handler and made it the default error handler.
+ """
+ if isinstance(obj, binary_type):
+ return obj
+
+ # We're given a text string
+ # If it has surrogates, we know because it will decode
+ original_errors = errors
+ if errors in _COMPOSED_ERROR_HANDLERS:
+ if HAS_SURROGATEESCAPE:
+ errors = 'surrogateescape'
+ elif errors == 'surrogate_or_strict':
+ errors = 'strict'
+ else:
+ errors = 'replace'
+
+ if isinstance(obj, text_type):
+ try:
+ # Try this first as it's the fastest
+ return obj.encode(encoding, errors)
+ except UnicodeEncodeError:
+ if original_errors in (None, 'surrogate_then_replace'):
+ # We should only reach this if encoding was non-utf8 original_errors was
+ # surrogate_then_escape and errors was surrogateescape
+
+ # Slow but works
+ return_string = obj.encode('utf-8', 'surrogateescape')
+ return_string = return_string.decode('utf-8', 'replace')
+ return return_string.encode(encoding, 'replace')
+ raise
+
+ # Note: We do these last even though we have to call to_bytes again on the
+ # value because we're optimizing the common case
+ if nonstring == 'simplerepr':
+ try:
+ value = str(obj)
+ except UnicodeError:
+ try:
+ value = repr(obj)
+ except UnicodeError:
+ # Giving up
+ return to_bytes('')
+ elif nonstring == 'passthru':
+ return obj
+ elif nonstring == 'empty':
+ # python2.4 doesn't have b''
+ return to_bytes('')
+ elif nonstring == 'strict':
+ raise TypeError('obj must be a string type')
+ else:
+ raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
+
+ return to_bytes(value, encoding, errors)
+
+
+def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
+ """Make sure that a string is a text string
+
+ :arg obj: An object to make sure is a text string. In most cases this
+ will be either a text string or a byte string. However, with
+ ``nonstring='simplerepr'``, this can be used as a traceback-free
+ version of ``str(obj)``.
+ :kwarg encoding: The encoding to use to transform from a byte string to
+ a text string. Defaults to using 'utf-8'.
+ :kwarg errors: The error handler to use if the byte string is not
+ decodable using the specified encoding. Any valid `codecs error
+ handler <https://docs.python.org/3/library/codecs.html#codec-base-classes>`_
+ may be specified. We support three additional error strategies
+ specifically aimed at helping people to port code:
+
+ :surrogate_or_strict: Will use surrogateescape if it is a valid
+ handler, otherwise it will use strict
+ :surrogate_or_replace: Will use surrogateescape if it is a valid
+ handler, otherwise it will use replace.
+ :surrogate_then_replace: Does the same as surrogate_or_replace but
+ `was added for symmetry with the error handlers in
+ :func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
+
+ Because surrogateescape was added in Python3 this usually means that
+ Python3 will use `surrogateescape` and Python2 will use the fallback
+ error handler. Note that the code checks for surrogateescape when the
+ module is imported. If you have a backport of `surrogateescape` for
+ python2, be sure to register the error handler prior to importing this
+ module.
+
+ The default until Ansible-2.2 was `surrogate_or_replace`
+ In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
+ with :func:`ansible.module_utils._text.to_bytes` .
+ :kwarg nonstring: The strategy to use if a nonstring is specified in
+ ``obj``. Default is 'simplerepr'. Valid values are:
+
+ :simplerepr: The default. This takes the ``str`` of the object and
+ then returns the text version of that string.
+ :empty: Return an empty text string
+ :passthru: Return the object passed in
+ :strict: Raise a :exc:`TypeError`
+
+ :returns: Typically this returns a text string. If a nonstring object is
+ passed in this may be a different type depending on the strategy
+ specified by nonstring. This will never return a byte string.
+ From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
+
+ .. version_changed:: 2.3
+
+ Added the surrogate_then_replace error handler and made it the default error handler.
+ """
+ if isinstance(obj, text_type):
+ return obj
+
+ if errors in _COMPOSED_ERROR_HANDLERS:
+ if HAS_SURROGATEESCAPE:
+ errors = 'surrogateescape'
+ elif errors == 'surrogate_or_strict':
+ errors = 'strict'
+ else:
+ errors = 'replace'
+
+ if isinstance(obj, binary_type):
+ # Note: We don't need special handling for surrogate_then_replace
+ # because all bytes will either be made into surrogates or are valid
+ # to decode.
+ return obj.decode(encoding, errors)
+
+ # Note: We do these last even though we have to call to_text again on the
+ # value because we're optimizing the common case
+ if nonstring == 'simplerepr':
+ try:
+ value = str(obj)
+ except UnicodeError:
+ try:
+ value = repr(obj)
+ except UnicodeError:
+ # Giving up
+ return u''
+ elif nonstring == 'passthru':
+ return obj
+ elif nonstring == 'empty':
+ return u''
+ elif nonstring == 'strict':
+ raise TypeError('obj must be a string type')
+ else:
+ raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
+
+ return to_text(value, encoding, errors)
+
+
+#: :py:func:`to_native`
+#: Transform a variable into the native str type for the python version
+#:
+#: On Python2, this is an alias for
+#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
+#: :func:`~ansible.module_utils.to_text`. It makes it easier to
+#: transform a variable into the native str type for the python version
+#: the code is running on. Use this when constructing the message to
+#: send to exceptions or when dealing with an API that needs to take
+#: a native string. Example::
+#:
+#: try:
+#: 1//0
+#: except ZeroDivisionError as e:
+#: raise MyException('Encountered and error: %s' % to_native(e))
+if PY3:
+ to_native = to_text
+else:
+ to_native = to_bytes
+
+
+def _json_encode_fallback(obj):
+ if isinstance(obj, Set):
+ return list(obj)
+ elif isinstance(obj, datetime.datetime):
+ return obj.isoformat()
+ raise TypeError("Cannot json serialize %s" % to_native(obj))
+
+
+def jsonify(data, **kwargs):
+ for encoding in ("utf-8", "latin-1"):
+ try:
+ return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = container_to_text(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
+ except UnicodeDecodeError:
+ continue
+ raise UnicodeError('Invalid unicode encoding encountered')
+
+
+def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
+ ''' Recursively convert dict keys and values to byte str
+
+ Specialized for json return because this only handles, lists, tuples,
+ and dict container types (the containers that the json module returns)
+ '''
+
+ if isinstance(d, text_type):
+ return to_bytes(d, encoding=encoding, errors=errors)
+ elif isinstance(d, dict):
+ return dict(container_to_bytes(o, encoding, errors) for o in iteritems(d))
+ elif isinstance(d, list):
+ return [container_to_bytes(o, encoding, errors) for o in d]
+ elif isinstance(d, tuple):
+ return tuple(container_to_bytes(o, encoding, errors) for o in d)
+ else:
+ return d
+
+
+def container_to_text(d, encoding='utf-8', errors='surrogate_or_strict'):
+ """Recursively convert dict keys and values to text str
+
+ Specialized for json return because this only handles, lists, tuples,
+ and dict container types (the containers that the json module returns)
+ """
+
+ if isinstance(d, binary_type):
+ # Warning, can traceback
+ return to_text(d, encoding=encoding, errors=errors)
+ elif isinstance(d, dict):
+ return dict(container_to_text(o, encoding, errors) for o in iteritems(d))
+ elif isinstance(d, list):
+ return [container_to_text(o, encoding, errors) for o in d]
+ elif isinstance(d, tuple):
+ return tuple(container_to_text(o, encoding, errors) for o in d)
+ else:
+ return d
diff --git a/lib/ansible/module_utils/common/text/formatters.py b/lib/ansible/module_utils/common/text/formatters.py
new file mode 100644
index 0000000..94ca5a3
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/formatters.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.six import iteritems
+
+SIZE_RANGES = {
+ 'Y': 1 << 80,
+ 'Z': 1 << 70,
+ 'E': 1 << 60,
+ 'P': 1 << 50,
+ 'T': 1 << 40,
+ 'G': 1 << 30,
+ 'M': 1 << 20,
+ 'K': 1 << 10,
+ 'B': 1,
+}
+
+
+def lenient_lowercase(lst):
+ """Lowercase elements of a list.
+
+ If an element is not a string, pass it through untouched.
+ """
+ lowered = []
+ for value in lst:
+ try:
+ lowered.append(value.lower())
+ except AttributeError:
+ lowered.append(value)
+ return lowered
+
+
+def human_to_bytes(number, default_unit=None, isbits=False):
+ """Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument.
+
+ example: human_to_bytes('10M') <=> human_to_bytes(10, 'M').
+
+ When isbits is False (default), converts bytes from a human-readable format to integer.
+ example: human_to_bytes('1MB') returns 1048576 (int).
+ The function expects 'B' (uppercase) as a byte identifier passed
+ as a part of 'name' param string or 'unit', e.g. 'MB'/'KB'/etc.
+ (except when the identifier is single 'b', it is perceived as a byte identifier too).
+ if 'Mb'/'Kb'/... is passed, the ValueError will be rased.
+
+ When isbits is True, converts bits from a human-readable format to integer.
+ example: human_to_bytes('1Mb', isbits=True) returns 8388608 (int) -
+ string bits representation was passed and return as a number or bits.
+ The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc.
+ if 'MB'/'KB'/... is passed, the ValueError will be rased.
+ """
+ m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
+ if m is None:
+ raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
+ try:
+ num = float(m.group(1))
+ except Exception:
+ raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
+
+ unit = m.group(2)
+ if unit is None:
+ unit = default_unit
+
+ if unit is None:
+ ''' No unit given, returning raw number '''
+ return int(round(num))
+ range_key = unit[0].upper()
+ try:
+ limit = SIZE_RANGES[range_key]
+ except Exception:
+ raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
+
+ # default value
+ unit_class = 'B'
+ unit_class_name = 'byte'
+ # handling bits case
+ if isbits:
+ unit_class = 'b'
+ unit_class_name = 'bit'
+ # check unit value if more than one character (KB, MB)
+ if len(unit) > 1:
+ expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
+ if range_key == 'B':
+ expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
+
+ if unit_class_name in unit.lower():
+ pass
+ elif unit[1] != unit_class:
+ raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
+
+ return int(round(num * limit))
+
+
+def bytes_to_human(size, isbits=False, unit=None):
+ base = 'Bytes'
+ if isbits:
+ base = 'bits'
+ suffix = ''
+
+ for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
+ if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
+ break
+
+ if limit != 1:
+ suffix += base[0]
+ else:
+ suffix = base
+
+ return '%.2f %s' % (size / limit, suffix)
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
new file mode 100644
index 0000000..5a4cebb
--- /dev/null
+++ b/lib/ansible/module_utils/common/validation.py
@@ -0,0 +1,578 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import re
+
+from ast import literal_eval
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._json_compat import json
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.text.converters import jsonify
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import (
+ binary_type,
+ integer_types,
+ string_types,
+ text_type,
+)
+
+
+def count_terms(terms, parameters):
+ """Count the number of occurrences of a key in a given dictionary
+
+ :arg terms: String or iterable of values to check
+ :arg parameters: Dictionary of parameters
+
+ :returns: An integer that is the number of occurrences of the terms values
+ in the provided dictionary.
+ """
+
+ if not is_iterable(terms):
+ terms = [terms]
+
+ return len(set(terms).intersection(parameters))
+
+
+def safe_eval(value, locals=None, include_exceptions=False):
+ # do not allow method calls to modules
+ if not isinstance(value, string_types):
+ # already templated to a datavaluestructure, perhaps?
+ if include_exceptions:
+ return (value, None)
+ return value
+ if re.search(r'\w\.\w+\(', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ # do not allow imports
+ if re.search(r'import \w+', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ try:
+ result = literal_eval(value)
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except Exception as e:
+ if include_exceptions:
+ return (value, e)
+ return value
+
+
+def check_mutually_exclusive(terms, parameters, options_context=None):
+ """Check mutually exclusive terms against argument parameters
+
+ Accepts a single list or list of lists that are groups of terms that should be
+ mutually exclusive with one another
+
+ :arg terms: List of mutually exclusive parameters
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``terms`` are
+ in a sub spec.
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for check in terms:
+ count = count_terms(check, parameters)
+ if count > 1:
+ results.append(check)
+
+ if results:
+ full_list = ['|'.join(check) for check in results]
+ msg = "parameters are mutually exclusive: %s" % ', '.join(full_list)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_one_of(terms, parameters, options_context=None):
+ """Check each list of terms to ensure at least one exists in the given module
+ parameters
+
+ Accepts a list of lists or tuples
+
+ :arg terms: List of lists of terms to check. For each list of terms, at
+ least one is required.
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``terms`` are
+ in a sub spec.
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for term in terms:
+ count = count_terms(term, parameters)
+ if count == 0:
+ results.append(term)
+
+ if results:
+ for term in results:
+ msg = "one of the following is required: %s" % ', '.join(term)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_together(terms, parameters, options_context=None):
+ """Check each list of terms to ensure every parameter in each list exists
+ in the given parameters.
+
+ Accepts a list of lists or tuples.
+
+ :arg terms: List of lists of terms to check. Each list should include
+ parameters that are all required when at least one is specified
+ in the parameters.
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``terms`` are
+ in a sub spec.
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for term in terms:
+ counts = [count_terms(field, parameters) for field in term]
+ non_zero = [c for c in counts if c > 0]
+ if len(non_zero) > 0:
+ if 0 in counts:
+ results.append(term)
+ if results:
+ for term in results:
+ msg = "parameters are required together: %s" % ', '.join(term)
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_by(requirements, parameters, options_context=None):
+ """For each key in requirements, check the corresponding list to see if they
+ exist in parameters.
+
+ Accepts a single string or list of values for each key.
+
+ :arg requirements: Dictionary of requirements
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``requirements`` are
+ in a sub spec.
+
+ :returns: Empty dictionary or raises :class:`TypeError` if the
+ """
+
+ result = {}
+ if requirements is None:
+ return result
+
+ for (key, value) in requirements.items():
+ if key not in parameters or parameters[key] is None:
+ continue
+ result[key] = []
+ # Support strings (single-item lists)
+ if isinstance(value, string_types):
+ value = [value]
+ for required in value:
+ if required not in parameters or parameters[required] is None:
+ result[key].append(required)
+
+ if result:
+ for key, missing in result.items():
+ if len(missing) > 0:
+ msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return result
+
+
+def check_required_arguments(argument_spec, parameters, options_context=None):
+ """Check all parameters in argument_spec and return a list of parameters
+ that are required but not present in parameters.
+
+ Raises :class:`TypeError` if the check fails
+
+ :arg argument_spec: Argument spec dictionary containing all parameters
+ and their specification
+ :arg parameters: Dictionary of parameters
+ :kwarg options_context: List of strings of parent key names if ``argument_spec`` are
+ in a sub spec.
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ """
+
+ missing = []
+ if argument_spec is None:
+ return missing
+
+ for (k, v) in argument_spec.items():
+ required = v.get('required', False)
+ if required and k not in parameters:
+ missing.append(k)
+
+ if missing:
+ msg = "missing required arguments: %s" % ", ".join(sorted(missing))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return missing
+
+
+def check_required_if(requirements, parameters, options_context=None):
+ """Check parameters that are conditionally required
+
+ Raises :class:`TypeError` if the check fails
+
+ :arg requirements: List of lists specifying a parameter, value, parameters
+ required when the given parameter is the specified value, and optionally
+ a boolean indicating any or all parameters are required.
+
+ :Example:
+
+ .. code-block:: python
+
+ required_if=[
+ ['state', 'present', ('path',), True],
+ ['someint', 99, ('bool_param', 'string_param')],
+ ]
+
+ :arg parameters: Dictionary of parameters
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ The results attribute of the exception contains a list of dictionaries.
+ Each dictionary is the result of evaluating each item in requirements.
+ Each return dictionary contains the following keys:
+
+ :key missing: List of parameters that are required but missing
+ :key requires: 'any' or 'all'
+ :key parameter: Parameter name that has the requirement
+ :key value: Original value of the parameter
+ :key requirements: Original required parameters
+
+ :Example:
+
+ .. code-block:: python
+
+ [
+ {
+ 'parameter': 'someint',
+ 'value': 99
+ 'requirements': ('bool_param', 'string_param'),
+ 'missing': ['string_param'],
+ 'requires': 'all',
+ }
+ ]
+
+ :kwarg options_context: List of strings of parent key names if ``requirements`` are
+ in a sub spec.
+ """
+ results = []
+ if requirements is None:
+ return results
+
+ for req in requirements:
+ missing = {}
+ missing['missing'] = []
+ max_missing_count = 0
+ is_one_of = False
+ if len(req) == 4:
+ key, val, requirements, is_one_of = req
+ else:
+ key, val, requirements = req
+
+ # is_one_of is True at least one requirement should be
+ # present, else all requirements should be present.
+ if is_one_of:
+ max_missing_count = len(requirements)
+ missing['requires'] = 'any'
+ else:
+ missing['requires'] = 'all'
+
+ if key in parameters and parameters[key] == val:
+ for check in requirements:
+ count = count_terms(check, parameters)
+ if count == 0:
+ missing['missing'].append(check)
+ if len(missing['missing']) and len(missing['missing']) >= max_missing_count:
+ missing['parameter'] = key
+ missing['value'] = val
+ missing['requirements'] = requirements
+ results.append(missing)
+
+ if results:
+ for missing in results:
+ msg = "%s is %s but %s of the following are missing: %s" % (
+ missing['parameter'], missing['value'], missing['requires'], ', '.join(missing['missing']))
+ if options_context:
+ msg = "{0} found in {1}".format(msg, " -> ".join(options_context))
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_missing_parameters(parameters, required_parameters=None):
+ """This is for checking for required params when we can not check via
+ argspec because we need more information than is simply given in the argspec.
+
+ Raises :class:`TypeError` if any required parameters are missing
+
+ :arg parameters: Dictionary of parameters
+ :arg required_parameters: List of parameters to look for in the given parameters.
+
+ :returns: Empty list or raises :class:`TypeError` if the check fails.
+ """
+ missing_params = []
+ if required_parameters is None:
+ return missing_params
+
+ for param in required_parameters:
+ if not parameters.get(param):
+ missing_params.append(param)
+
+ if missing_params:
+ msg = "missing required arguments: %s" % ', '.join(missing_params)
+ raise TypeError(to_native(msg))
+
+ return missing_params
+
+
+# FIXME: The param and prefix parameters here are coming from AnsibleModule._check_type_string()
+# which is using those for the warning messaged based on string conversion warning settings.
+# Not sure how to deal with that here since we don't have config state to query.
+def check_type_str(value, allow_conversion=True, param=None, prefix=''):
+ """Verify that the value is a string or convert to a string.
+
+ Since unexpected changes can sometimes happen when converting to a string,
+ ``allow_conversion`` controls whether or not the value will be converted or a
+ TypeError will be raised if the value is not a string and would be converted
+
+ :arg value: Value to validate or convert to a string
+ :arg allow_conversion: Whether to convert the string and return it or raise
+ a TypeError
+
+ :returns: Original value if it is a string, the value converted to a string
+ if allow_conversion=True, or raises a TypeError if allow_conversion=False.
+ """
+ if isinstance(value, string_types):
+ return value
+
+ if allow_conversion:
+ return to_native(value, errors='surrogate_or_strict')
+
+ msg = "'{0!r}' is not a string and conversion is not allowed".format(value)
+ raise TypeError(to_native(msg))
+
+
+def check_type_list(value):
+ """Verify that the value is a list or convert to a list
+
+ A comma separated string will be split into a list. Raises a :class:`TypeError`
+ if unable to convert to a list.
+
+ :arg value: Value to validate or convert to a list
+
+ :returns: Original value if it is already a list, single item list if a
+ float, int, or string without commas, or a multi-item list if a
+ comma-delimited string.
+ """
+ if isinstance(value, list):
+ return value
+
+ if isinstance(value, string_types):
+ return value.split(",")
+ elif isinstance(value, int) or isinstance(value, float):
+ return [str(value)]
+
+ raise TypeError('%s cannot be converted to a list' % type(value))
+
+
+def check_type_dict(value):
+ """Verify that value is a dict or convert it to a dict and return it.
+
+ Raises :class:`TypeError` if unable to convert to a dict
+
+ :arg value: Dict or string to convert to a dict. Accepts ``k1=v2, k2=v2``.
+
+ :returns: value converted to a dictionary
+ """
+ if isinstance(value, dict):
+ return value
+
+ if isinstance(value, string_types):
+ if value.startswith("{"):
+ try:
+ return json.loads(value)
+ except Exception:
+ (result, exc) = safe_eval(value, dict(), include_exceptions=True)
+ if exc is not None:
+ raise TypeError('unable to evaluate string as dictionary')
+ return result
+ elif '=' in value:
+ fields = []
+ field_buffer = []
+ in_quote = False
+ in_escape = False
+ for c in value.strip():
+ if in_escape:
+ field_buffer.append(c)
+ in_escape = False
+ elif c == '\\':
+ in_escape = True
+ elif not in_quote and c in ('\'', '"'):
+ in_quote = c
+ elif in_quote and in_quote == c:
+ in_quote = False
+ elif not in_quote and c in (',', ' '):
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ field_buffer = []
+ else:
+ field_buffer.append(c)
+
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ return dict(x.split("=", 1) for x in fields)
+ else:
+ raise TypeError("dictionary requested, could not parse JSON or key=value")
+
+ raise TypeError('%s cannot be converted to a dict' % type(value))
+
+
+def check_type_bool(value):
+ """Verify that the value is a bool or convert it to a bool and return it.
+
+ Raises :class:`TypeError` if unable to convert to a bool
+
+ :arg value: String, int, or float to convert to bool. Valid booleans include:
+ '1', 'on', 1, '0', 0, 'n', 'f', 'false', 'true', 'y', 't', 'yes', 'no', 'off'
+
+ :returns: Boolean True or False
+ """
+ if isinstance(value, bool):
+ return value
+
+ if isinstance(value, string_types) or isinstance(value, (int, float)):
+ return boolean(value)
+
+ raise TypeError('%s cannot be converted to a bool' % type(value))
+
+
+def check_type_int(value):
+ """Verify that the value is an integer and return it or convert the value
+ to an integer and return it
+
+ Raises :class:`TypeError` if unable to convert to an int
+
+ :arg value: String or int to convert of verify
+
+ :return: int of given value
+ """
+ if isinstance(value, integer_types):
+ return value
+
+ if isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ pass
+
+ raise TypeError('%s cannot be converted to an int' % type(value))
+
+
+def check_type_float(value):
+ """Verify that value is a float or convert it to a float and return it
+
+ Raises :class:`TypeError` if unable to convert to a float
+
+ :arg value: float, int, str, or bytes to verify or convert and return.
+
+ :returns: float of given value.
+ """
+ if isinstance(value, float):
+ return value
+
+ if isinstance(value, (binary_type, text_type, int)):
+ try:
+ return float(value)
+ except ValueError:
+ pass
+
+ raise TypeError('%s cannot be converted to a float' % type(value))
+
+
+def check_type_path(value,):
+ """Verify the provided value is a string or convert it to a string,
+ then return the expanded path
+ """
+ value = check_type_str(value)
+ return os.path.expanduser(os.path.expandvars(value))
+
+
+def check_type_raw(value):
+ """Returns the raw value"""
+ return value
+
+
+def check_type_bytes(value):
+ """Convert a human-readable string value to bytes
+
+ Raises :class:`TypeError` if unable to covert the value
+ """
+ try:
+ return human_to_bytes(value)
+ except ValueError:
+ raise TypeError('%s cannot be converted to a Byte value' % type(value))
+
+
+def check_type_bits(value):
+ """Convert a human-readable string bits value to bits in integer.
+
+ Example: ``check_type_bits('1Mb')`` returns integer 1048576.
+
+ Raises :class:`TypeError` if unable to covert the value.
+ """
+ try:
+ return human_to_bytes(value, isbits=True)
+ except ValueError:
+ raise TypeError('%s cannot be converted to a Bit value' % type(value))
+
+
+def check_type_jsonarg(value):
+ """Return a jsonified string. Sometimes the controller turns a json string
+ into a dict/list so transform it back into json here
+
+ Raises :class:`TypeError` if unable to covert the value
+
+ """
+ if isinstance(value, (text_type, binary_type)):
+ return value.strip()
+ elif isinstance(value, (list, tuple, dict)):
+ return jsonify(value)
+ raise TypeError('%s cannot be converted to a json string' % type(value))
diff --git a/lib/ansible/module_utils/common/warnings.py b/lib/ansible/module_utils/common/warnings.py
new file mode 100644
index 0000000..9423e6a
--- /dev/null
+++ b/lib/ansible/module_utils/common/warnings.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+
+_global_warnings = []
+_global_deprecations = []
+
+
+def warn(warning):
+ if isinstance(warning, string_types):
+ _global_warnings.append(warning)
+ else:
+ raise TypeError("warn requires a string not a %s" % type(warning))
+
+
+def deprecate(msg, version=None, date=None, collection_name=None):
+ if isinstance(msg, string_types):
+ # For compatibility, we accept that neither version nor date is set,
+ # and treat that the same as if version would haven been set
+ if date is not None:
+ _global_deprecations.append({'msg': msg, 'date': date, 'collection_name': collection_name})
+ else:
+ _global_deprecations.append({'msg': msg, 'version': version, 'collection_name': collection_name})
+ else:
+ raise TypeError("deprecate requires a string not a %s" % type(msg))
+
+
+def get_warning_messages():
+ """Return a tuple of warning messages accumulated over this run"""
+ return tuple(_global_warnings)
+
+
+def get_deprecation_messages():
+ """Return a tuple of deprecations accumulated over this run"""
+ return tuple(_global_deprecations)
diff --git a/lib/ansible/module_utils/common/yaml.py b/lib/ansible/module_utils/common/yaml.py
new file mode 100644
index 0000000..e79cc09
--- /dev/null
+++ b/lib/ansible/module_utils/common/yaml.py
@@ -0,0 +1,48 @@
+# (c) 2020 Matt Martz <matt@sivel.net>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+"""
+This file provides ease of use shortcuts for loading and dumping YAML,
+preferring the YAML compiled C extensions to reduce duplicated code.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import partial as _partial
+
+HAS_LIBYAML = False
+
+try:
+ import yaml as _yaml
+except ImportError:
+ HAS_YAML = False
+else:
+ HAS_YAML = True
+
+if HAS_YAML:
+ try:
+ from yaml import CSafeLoader as SafeLoader
+ from yaml import CSafeDumper as SafeDumper
+ from yaml.cyaml import CParser as Parser
+
+ HAS_LIBYAML = True
+ except (ImportError, AttributeError):
+ from yaml import SafeLoader # type: ignore[misc]
+ from yaml import SafeDumper # type: ignore[misc]
+ from yaml.parser import Parser # type: ignore[misc]
+
+ yaml_load = _partial(_yaml.load, Loader=SafeLoader)
+ yaml_load_all = _partial(_yaml.load_all, Loader=SafeLoader)
+
+ yaml_dump = _partial(_yaml.dump, Dumper=SafeDumper)
+ yaml_dump_all = _partial(_yaml.dump_all, Dumper=SafeDumper)
+else:
+ SafeLoader = object # type: ignore[assignment,misc]
+ SafeDumper = object # type: ignore[assignment,misc]
+ Parser = object # type: ignore[assignment,misc]
+
+ yaml_load = None # type: ignore[assignment]
+ yaml_load_all = None # type: ignore[assignment]
+ yaml_dump = None # type: ignore[assignment]
+ yaml_dump_all = None # type: ignore[assignment]
diff --git a/lib/ansible/module_utils/compat/__init__.py b/lib/ansible/module_utils/compat/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/compat/__init__.py
diff --git a/lib/ansible/module_utils/compat/_selectors2.py b/lib/ansible/module_utils/compat/_selectors2.py
new file mode 100644
index 0000000..be44b4b
--- /dev/null
+++ b/lib/ansible/module_utils/compat/_selectors2.py
@@ -0,0 +1,655 @@
+# This file is from the selectors2.py package. It backports the PSF Licensed
+# selectors module from the Python-3.5 stdlib to older versions of Python.
+# The author, Seth Michael Larson, dual licenses his modifications under the
+# PSF License and MIT License:
+# https://github.com/SethMichaelLarson/selectors2#license
+#
+# Copyright (c) 2016 Seth Michael Larson
+#
+# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
+#
+
+
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no file descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+__author__ = 'Seth Michael Larson'
+__email__ = 'sethmichaellarson@protonmail.com'
+__version__ = '1.1.1'
+__license__ = 'MIT'
+
+__all__ = [
+ 'EVENT_READ',
+ 'EVENT_WRITE',
+ 'SelectorError',
+ 'SelectorKey',
+ 'DefaultSelector'
+]
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Python 3.5 uses a more direct route to wrap system calls to increase speed.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all selectors restart system calls. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as err:
+ if err.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout=timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ __all__.append('SelectSelector')
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+ __all__.append('PollSelector')
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+ __all__.append('EpollSelector')
+
+
+if hasattr(select, "devpoll"):
+ class DevpollSelector(BaseSelector):
+ """Solaris /dev/poll selector."""
+
+ def __init__(self):
+ super(DevpollSelector, self).__init__()
+ self._devpoll = select.devpoll()
+
+ def fileno(self):
+ return self._devpoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(DevpollSelector, self).register(fileobj, events, data)
+ poll_events = 0
+ if events & EVENT_READ:
+ poll_events |= select.POLLIN
+ if events & EVENT_WRITE:
+ poll_events |= select.POLLOUT
+ self._devpoll.register(key.fd, poll_events)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(DevpollSelector, self).unregister(fileobj)
+ self._devpoll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.devpoll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._devpoll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+ def close(self):
+ self._devpoll.close()
+ super(DevpollSelector, self).close()
+
+ __all__.append('DevpollSelector')
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._wrap_control, True,
+ None, max_events, timeout=timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+ def _wrap_control(self, changelist, max_events, timeout):
+ return self._kqueue.control(changelist, max_events, timeout)
+
+ __all__.append('KqueueSelector')
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll == devpoll > poll > select.
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
+ DefaultSelector = KqueueSelector
+elif 'DevpollSelector' in globals():
+ DefaultSelector = DevpollSelector
+elif 'EpollSelector' in globals(): # Platform-specific: Linux
+ DefaultSelector = EpollSelector
+elif 'PollSelector' in globals(): # Platform-specific: Linux
+ DefaultSelector = PollSelector
+elif 'SelectSelector' in globals(): # Platform-specific: Windows
+ DefaultSelector = SelectSelector
+else: # Platform-specific: AppEngine
+ def no_selector(_):
+ raise ValueError("Platform does not have a selector")
+ DefaultSelector = no_selector
+ HAS_SELECT = False
diff --git a/lib/ansible/module_utils/compat/importlib.py b/lib/ansible/module_utils/compat/importlib.py
new file mode 100644
index 0000000..0b7fb2c
--- /dev/null
+++ b/lib/ansible/module_utils/compat/importlib.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+try:
+ from importlib import import_module
+except ImportError:
+ # importlib.import_module returns the tail
+ # whereas __import__ returns the head
+ # compat to work like importlib.import_module
+ def import_module(name): # type: ignore[misc]
+ __import__(name)
+ return sys.modules[name]
diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py
new file mode 100644
index 0000000..85478ea
--- /dev/null
+++ b/lib/ansible/module_utils/compat/paramiko.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import types
+import warnings
+
+PARAMIKO_IMPORT_ERR = None
+
+try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('ignore', message='Blowfish has been deprecated', category=UserWarning)
+ import paramiko
+# paramiko and gssapi are incompatible and raise AttributeError not ImportError
+# When running in FIPS mode, cryptography raises InternalError
+# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
+except Exception as err:
+ paramiko = None # type: types.ModuleType | None # type: ignore[no-redef]
+ PARAMIKO_IMPORT_ERR = err
diff --git a/lib/ansible/module_utils/compat/selectors.py b/lib/ansible/module_utils/compat/selectors.py
new file mode 100644
index 0000000..93ffc62
--- /dev/null
+++ b/lib/ansible/module_utils/compat/selectors.py
@@ -0,0 +1,57 @@
+# (c) 2014, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat selectors library. Python-3.5 has this builtin. The selectors2
+package exists on pypi to backport the functionality as far as python-2.6.
+'''
+# The following makes it easier for us to script updates of the bundled code
+_BUNDLED_METADATA = {"pypi_name": "selectors2", "version": "1.1.1", "version_constraints": ">1.0,<2.0"}
+
+# Added these bugfix commits from 2.1.0:
+# * https://github.com/SethMichaelLarson/selectors2/commit/3bd74f2033363b606e1e849528ccaa76f5067590
+# Wrap kqueue.control so that timeout is a keyword arg
+# * https://github.com/SethMichaelLarson/selectors2/commit/6f6a26f42086d8aab273b30be492beecb373646b
+# Fix formatting of the kqueue.control patch for pylint
+# * https://github.com/SethMichaelLarson/selectors2/commit/f0c2c6c66cfa7662bc52beaf4e2d65adfa25e189
+# Fix use of OSError exception for py3 and use the wrapper of kqueue.control so retries of
+# interrupted syscalls work with kqueue
+
+import os.path
+import sys
+import types
+
+try:
+ # Python 3.4+
+ import selectors as _system_selectors
+except ImportError:
+ try:
+ # backport package installed in the system
+ import selectors2 as _system_selectors # type: ignore[no-redef]
+ except ImportError:
+ _system_selectors = None # type: types.ModuleType | None # type: ignore[no-redef]
+
+if _system_selectors:
+ selectors = _system_selectors
+else:
+ # Our bundled copy
+ from ansible.module_utils.compat import _selectors2 as selectors # type: ignore[no-redef]
+sys.modules['ansible.module_utils.compat.selectors'] = selectors
diff --git a/lib/ansible/module_utils/compat/selinux.py b/lib/ansible/module_utils/compat/selinux.py
new file mode 100644
index 0000000..7191713
--- /dev/null
+++ b/lib/ansible/module_utils/compat/selinux.py
@@ -0,0 +1,113 @@
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ctypes import CDLL, c_char_p, c_int, byref, POINTER, get_errno
+
+try:
+ _selinux_lib = CDLL('libselinux.so.1', use_errno=True)
+except OSError:
+ raise ImportError('unable to load libselinux.so')
+
+
+def _module_setup():
+ def _check_rc(rc):
+ if rc < 0:
+ errno = get_errno()
+ raise OSError(errno, os.strerror(errno))
+ return rc
+
+ binary_char_type = type(b'')
+
+ class _to_char_p:
+ @classmethod
+ def from_param(cls, strvalue):
+ if strvalue is not None and not isinstance(strvalue, binary_char_type):
+ strvalue = to_bytes(strvalue)
+
+ return strvalue
+
+ # FIXME: swap restype to errcheck
+
+ _funcmap = dict(
+ is_selinux_enabled={},
+ is_selinux_mls_enabled={},
+ lgetfilecon_raw=dict(argtypes=[_to_char_p, POINTER(c_char_p)], restype=_check_rc),
+ # NB: matchpathcon is deprecated and should be rewritten on selabel_lookup (but will be a PITA)
+ matchpathcon=dict(argtypes=[_to_char_p, c_int, POINTER(c_char_p)], restype=_check_rc),
+ security_policyvers={},
+ selinux_getenforcemode=dict(argtypes=[POINTER(c_int)]),
+ security_getenforce={},
+ lsetfilecon=dict(argtypes=[_to_char_p, _to_char_p], restype=_check_rc),
+ selinux_getpolicytype=dict(argtypes=[POINTER(c_char_p)], restype=_check_rc),
+ )
+
+ _thismod = sys.modules[__name__]
+
+ for fname, cfg in _funcmap.items():
+ fn = getattr(_selinux_lib, fname, None)
+
+ if not fn:
+ raise ImportError('missing selinux function: {0}'.format(fname))
+
+ # all ctypes pointers share the same base type
+ base_ptr_type = type(POINTER(c_int))
+ fn.argtypes = cfg.get('argtypes', None)
+ fn.restype = cfg.get('restype', c_int)
+
+ # just patch simple directly callable functions directly onto the module
+ if not fn.argtypes or not any(argtype for argtype in fn.argtypes if type(argtype) == base_ptr_type):
+ setattr(_thismod, fname, fn)
+ continue
+
+ # NB: this validation code must run after all the wrappers have been declared
+ unimplemented_funcs = set(_funcmap).difference(dir(_thismod))
+ if unimplemented_funcs:
+ raise NotImplementedError('implementation is missing functions: {0}'.format(unimplemented_funcs))
+
+
+# begin wrapper function impls
+
+def selinux_getenforcemode():
+ enforcemode = c_int()
+ rc = _selinux_lib.selinux_getenforcemode(byref(enforcemode))
+ return [rc, enforcemode.value]
+
+
+def selinux_getpolicytype():
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.selinux_getpolicytype(byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+def lgetfilecon_raw(path):
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.lgetfilecon_raw(path, byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+def matchpathcon(path, mode):
+ con = c_char_p()
+ try:
+ rc = _selinux_lib.matchpathcon(path, mode, byref(con))
+ return [rc, to_native(con.value)]
+ finally:
+ _selinux_lib.freecon(con)
+
+
+_module_setup()
+del _module_setup
+
+# end wrapper function impls
diff --git a/lib/ansible/module_utils/compat/typing.py b/lib/ansible/module_utils/compat/typing.py
new file mode 100644
index 0000000..27b25f7
--- /dev/null
+++ b/lib/ansible/module_utils/compat/typing.py
@@ -0,0 +1,25 @@
+"""Compatibility layer for the `typing` module, providing all Python versions access to the newest type-hinting features."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# pylint: disable=wildcard-import,unused-wildcard-import
+
+# catch *all* exceptions to prevent type annotation support module bugs causing runtime failures
+# (eg, https://github.com/ansible/ansible/issues/77857)
+
+try:
+ from typing_extensions import *
+except Exception: # pylint: disable=broad-except
+ pass
+
+try:
+ from typing import * # type: ignore[misc]
+except Exception: # pylint: disable=broad-except
+ pass
+
+
+try:
+ cast
+except NameError:
+ def cast(typ, val): # type: ignore[no-redef]
+ return val
diff --git a/lib/ansible/module_utils/compat/version.py b/lib/ansible/module_utils/compat/version.py
new file mode 100644
index 0000000..f4db1ef
--- /dev/null
+++ b/lib/ansible/module_utils/compat/version.py
@@ -0,0 +1,343 @@
+# Vendored copy of distutils/version.py from CPython 3.9.5
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+try:
+ RE_FLAGS = re.VERBOSE | re.ASCII # type: ignore[attr-defined]
+except AttributeError:
+ RE_FLAGS = re.VERBOSE
+
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion(Version):
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
+ RE_FLAGS)
+
+ def parse(self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = \
+ match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+ def __str__(self):
+ if self.version[2] == 0:
+ vstring = '.'.join(map(str, self.version[0:2]))
+ else:
+ vstring = '.'.join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if (not self.prerelease and not other.prerelease):
+ return 0
+ elif (self.prerelease and not other.prerelease):
+ return -1
+ elif (not self.prerelease and other.prerelease):
+ return 1
+ elif (self.prerelease and other.prerelease):
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ raise AssertionError("never get here")
+
+# end class StrictVersion
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking them.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion(Version):
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != '.']
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+# end class LooseVersion
diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py
new file mode 100644
index 0000000..1396c1c
--- /dev/null
+++ b/lib/ansible/module_utils/connection.py
@@ -0,0 +1,222 @@
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import hashlib
+import json
+import socket
+import struct
+import traceback
+import uuid
+
+from functools import partial
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import cPickle
+
+
+def write_to_file_descriptor(fd, obj):
+ """Handles making sure all data is properly written to file descriptor fd.
+
+ In particular, that data is encoded in a character stream-friendly way and
+ that all data gets written before returning.
+ """
+ # Need to force a protocol that is compatible with both py2 and py3.
+ # That would be protocol=2 or less.
+ # Also need to force a protocol that excludes certain control chars as
+ # stdin in this case is a pty and control chars will cause problems.
+ # that means only protocol=0 will work.
+ src = cPickle.dumps(obj, protocol=0)
+
+ # raw \r characters will not survive pty round-trip
+ # They should be rehydrated on the receiving end
+ src = src.replace(b'\r', br'\r')
+ data_hash = to_bytes(hashlib.sha1(src).hexdigest())
+
+ os.write(fd, b'%d\n' % len(src))
+ os.write(fd, src)
+ os.write(fd, b'%s\n' % data_hash)
+
+
+def send_data(s, data):
+ packed_len = struct.pack('!Q', len(data))
+ return s.sendall(packed_len + data)
+
+
+def recv_data(s):
+ header_len = 8 # size of a packed unsigned long long
+ data = to_bytes("")
+ while len(data) < header_len:
+ d = s.recv(header_len - len(data))
+ if not d:
+ return None
+ data += d
+ data_len = struct.unpack('!Q', data[:header_len])[0]
+ data = data[header_len:]
+ while len(data) < data_len:
+ d = s.recv(data_len - len(data))
+ if not d:
+ return None
+ data += d
+ return data
+
+
+def exec_command(module, command):
+ connection = Connection(module._socket_path)
+ try:
+ out = connection.exec_command(command)
+ except ConnectionError as exc:
+ code = getattr(exc, 'code', 1)
+ message = getattr(exc, 'err', exc)
+ return code, '', to_text(message, errors='surrogate_then_replace')
+ return 0, out, ''
+
+
+def request_builder(method_, *args, **kwargs):
+ reqid = str(uuid.uuid4())
+ req = {'jsonrpc': '2.0', 'method': method_, 'id': reqid}
+ req['params'] = (args, kwargs)
+
+ return req
+
+
+class ConnectionError(Exception):
+
+ def __init__(self, message, *args, **kwargs):
+ super(ConnectionError, self).__init__(message)
+ for k, v in iteritems(kwargs):
+ setattr(self, k, v)
+
+
+class Connection(object):
+
+ def __init__(self, socket_path):
+ if socket_path is None:
+ raise AssertionError('socket_path must be a value')
+ self.socket_path = socket_path
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if name.startswith('_'):
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+ return partial(self.__rpc__, name)
+
+ def _exec_jsonrpc(self, name, *args, **kwargs):
+
+ req = request_builder(name, *args, **kwargs)
+ reqid = req['id']
+
+ if not os.path.exists(self.socket_path):
+ raise ConnectionError(
+ 'socket path %s does not exist or cannot be found. See Troubleshooting socket '
+ 'path issues in the Network Debug and Troubleshooting Guide' % self.socket_path
+ )
+
+ try:
+ data = json.dumps(req, cls=AnsibleJSONEncoder, vault_to_text=True)
+ except TypeError as exc:
+ raise ConnectionError(
+ "Failed to encode some variables as JSON for communication with ansible-connection. "
+ "The original exception was: %s" % to_text(exc)
+ )
+
+ try:
+ out = self.send(data)
+ except socket.error as e:
+ raise ConnectionError(
+ 'unable to connect to socket %s. See Troubleshooting socket path issues '
+ 'in the Network Debug and Troubleshooting Guide' % self.socket_path,
+ err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
+ )
+
+ try:
+ response = json.loads(out)
+ except ValueError:
+ # set_option(s) has sensitive info, and the details are unlikely to matter anyway
+ if name.startswith("set_option"):
+ raise ConnectionError(
+ "Unable to decode JSON from response to {0}. Received '{1}'.".format(name, out)
+ )
+ params = [repr(arg) for arg in args] + ['{0}={1!r}'.format(k, v) for k, v in iteritems(kwargs)]
+ params = ', '.join(params)
+ raise ConnectionError(
+ "Unable to decode JSON from response to {0}({1}). Received '{2}'.".format(name, params, out)
+ )
+
+ if response['id'] != reqid:
+ raise ConnectionError('invalid json-rpc id received')
+ if "result_type" in response:
+ response["result"] = cPickle.loads(to_bytes(response["result"]))
+
+ return response
+
+ def __rpc__(self, name, *args, **kwargs):
+ """Executes the json-rpc and returns the output received
+ from remote device.
+ :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
+ :args: Ordered list of params passed as arguments to rpc method
+ :kwargs: Dict of valid key, value pairs passed as arguments to rpc method
+
+ For usage refer the respective connection plugin docs.
+ """
+
+ response = self._exec_jsonrpc(name, *args, **kwargs)
+
+ if 'error' in response:
+ err = response.get('error')
+ msg = err.get('data') or err['message']
+ code = err['code']
+ raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code)
+
+ return response['result']
+
+ def send(self, data):
+ try:
+ sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sf.connect(self.socket_path)
+
+ send_data(sf, to_bytes(data))
+ response = recv_data(sf)
+
+ except socket.error as e:
+ sf.close()
+ raise ConnectionError(
+ 'unable to connect to socket %s. See the socket path issue category in '
+ 'Network Debug and Troubleshooting Guide' % self.socket_path,
+ err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
+ )
+
+ sf.close()
+
+ return to_text(response, errors='surrogate_or_strict')
diff --git a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
new file mode 100644
index 0000000..48c4a19
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
@@ -0,0 +1,460 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible.AccessToken
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID_AND_ATTRIBUTES
+ {
+ public Luid Luid;
+ public UInt32 Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SID_AND_ATTRIBUTES
+ {
+ public IntPtr Sid;
+ public int Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_PRIVILEGES
+ {
+ public UInt32 PrivilegeCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public LUID_AND_ATTRIBUTES[] Privileges;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_USER
+ {
+ public SID_AND_ATTRIBUTES User;
+ }
+
+ public enum TokenInformationClass : uint
+ {
+ TokenUser = 1,
+ TokenPrivileges = 3,
+ TokenStatistics = 10,
+ TokenElevationType = 18,
+ TokenLinkedToken = 19,
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool CloseHandle(
+ IntPtr hObject);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool DuplicateTokenEx(
+ SafeNativeHandle hExistingToken,
+ TokenAccessLevels dwDesiredAccess,
+ IntPtr lpTokenAttributes,
+ SecurityImpersonationLevel ImpersonationLevel,
+ TokenType TokenType,
+ out SafeNativeHandle phNewToken);
+
+ [DllImport("kernel32.dll")]
+ public static extern SafeNativeHandle GetCurrentProcess();
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool GetTokenInformation(
+ SafeNativeHandle TokenHandle,
+ NativeHelpers.TokenInformationClass TokenInformationClass,
+ SafeMemoryBuffer TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool ImpersonateLoggedOnUser(
+ SafeNativeHandle hToken);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LogonUserW(
+ string lpszUsername,
+ string lpszDomain,
+ string lpszPassword,
+ LogonType dwLogonType,
+ LogonProvider dwLogonProvider,
+ out SafeNativeHandle phToken);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeNameW(
+ string lpSystemName,
+ ref Luid lpLuid,
+ StringBuilder lpName,
+ ref UInt32 cchName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern SafeNativeHandle OpenProcess(
+ ProcessAccessFlags dwDesiredAccess,
+ bool bInheritHandle,
+ UInt32 dwProcessId);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool OpenProcessToken(
+ SafeNativeHandle ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out SafeNativeHandle TokenHandle);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool RevertToSelf();
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ public enum LogonProvider
+ {
+ Default,
+ WinNT35,
+ WinNT40,
+ WinNT50,
+ }
+
+ public enum LogonType
+ {
+ Interactive = 2,
+ Network = 3,
+ Batch = 4,
+ Service = 5,
+ Unlock = 7,
+ NetworkCleartext = 8,
+ NewCredentials = 9,
+ }
+
+ [Flags]
+ public enum PrivilegeAttributes : uint
+ {
+ Disabled = 0x00000000,
+ EnabledByDefault = 0x00000001,
+ Enabled = 0x00000002,
+ Removed = 0x00000004,
+ UsedForAccess = 0x80000000,
+ }
+
+ [Flags]
+ public enum ProcessAccessFlags : uint
+ {
+ Terminate = 0x00000001,
+ CreateThread = 0x00000002,
+ VmOperation = 0x00000008,
+ VmRead = 0x00000010,
+ VmWrite = 0x00000020,
+ DupHandle = 0x00000040,
+ CreateProcess = 0x00000080,
+ SetQuota = 0x00000100,
+ SetInformation = 0x00000200,
+ QueryInformation = 0x00000400,
+ SuspendResume = 0x00000800,
+ QueryLimitedInformation = 0x00001000,
+ Delete = 0x00010000,
+ ReadControl = 0x00020000,
+ WriteDac = 0x00040000,
+ WriteOwner = 0x00080000,
+ Synchronize = 0x00100000,
+ }
+
+ public enum SecurityImpersonationLevel
+ {
+ Anonymous,
+ Identification,
+ Impersonation,
+ Delegation,
+ }
+
+ public enum TokenElevationType
+ {
+ Default = 1,
+ Full,
+ Limited,
+ }
+
+ public enum TokenType
+ {
+ Primary = 1,
+ Impersonation,
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct Luid
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+
+ public static explicit operator UInt64(Luid l)
+ {
+ return (UInt64)((UInt64)l.HighPart << 32) | (UInt64)l.LowPart;
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TokenStatistics
+ {
+ public Luid TokenId;
+ public Luid AuthenticationId;
+ public Int64 ExpirationTime;
+ public TokenType TokenType;
+ public SecurityImpersonationLevel ImpersonationLevel;
+ public UInt32 DynamicCharged;
+ public UInt32 DynamicAvailable;
+ public UInt32 GroupCount;
+ public UInt32 PrivilegeCount;
+ public Luid ModifiedId;
+ }
+
+ public class PrivilegeInfo
+ {
+ public string Name;
+ public PrivilegeAttributes Attributes;
+
+ internal PrivilegeInfo(NativeHelpers.LUID_AND_ATTRIBUTES la)
+ {
+ Name = TokenUtil.GetPrivilegeName(la.Luid);
+ Attributes = (PrivilegeAttributes)la.Attributes;
+ }
+ }
+
+ public class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeNativeHandle() : base(true) { }
+ public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseHandle(handle);
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2} - 0x{2:X8})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class TokenUtil
+ {
+ public static SafeNativeHandle DuplicateToken(SafeNativeHandle hToken, TokenAccessLevels access,
+ SecurityImpersonationLevel impersonationLevel, TokenType tokenType)
+ {
+ SafeNativeHandle dupToken;
+ if (!NativeMethods.DuplicateTokenEx(hToken, access, IntPtr.Zero, impersonationLevel, tokenType, out dupToken))
+ throw new Win32Exception("Failed to duplicate token");
+ return dupToken;
+ }
+
+ public static SecurityIdentifier GetTokenUser(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenUser))
+ {
+ NativeHelpers.TOKEN_USER tokenUser = (NativeHelpers.TOKEN_USER)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(NativeHelpers.TOKEN_USER));
+ return new SecurityIdentifier(tokenUser.User.Sid);
+ }
+ }
+
+ public static List<PrivilegeInfo> GetTokenPrivileges(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenPrivileges))
+ {
+ NativeHelpers.TOKEN_PRIVILEGES tokenPrivs = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(NativeHelpers.TOKEN_PRIVILEGES));
+
+ NativeHelpers.LUID_AND_ATTRIBUTES[] luidAttrs =
+ new NativeHelpers.LUID_AND_ATTRIBUTES[tokenPrivs.PrivilegeCount];
+ PtrToStructureArray(luidAttrs, IntPtr.Add(tokenInfo.DangerousGetHandle(),
+ Marshal.SizeOf(tokenPrivs.PrivilegeCount)));
+
+ return luidAttrs.Select(la => new PrivilegeInfo(la)).ToList();
+ }
+ }
+
+ public static TokenStatistics GetTokenStatistics(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenStatistics))
+ {
+ TokenStatistics tokenStats = (TokenStatistics)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(TokenStatistics));
+ return tokenStats;
+ }
+ }
+
+ public static TokenElevationType GetTokenElevationType(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenElevationType))
+ {
+ return (TokenElevationType)Marshal.ReadInt32(tokenInfo.DangerousGetHandle());
+ }
+ }
+
+ public static SafeNativeHandle GetTokenLinkedToken(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenLinkedToken))
+ {
+ return new SafeNativeHandle(Marshal.ReadIntPtr(tokenInfo.DangerousGetHandle()));
+ }
+ }
+
+ public static IEnumerable<SafeNativeHandle> EnumerateUserTokens(SecurityIdentifier sid,
+ TokenAccessLevels access = TokenAccessLevels.Query)
+ {
+ foreach (System.Diagnostics.Process process in System.Diagnostics.Process.GetProcesses())
+ {
+ // We always need the Query access level so we can query the TokenUser
+ using (process)
+ using (SafeNativeHandle hToken = TryOpenAccessToken(process, access | TokenAccessLevels.Query))
+ {
+ if (hToken == null)
+ continue;
+
+ if (!sid.Equals(GetTokenUser(hToken)))
+ continue;
+
+ yield return hToken;
+ }
+ }
+ }
+
+ public static void ImpersonateToken(SafeNativeHandle hToken)
+ {
+ if (!NativeMethods.ImpersonateLoggedOnUser(hToken))
+ throw new Win32Exception("Failed to impersonate token");
+ }
+
+ public static SafeNativeHandle LogonUser(string username, string domain, string password, LogonType logonType,
+ LogonProvider logonProvider)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.LogonUserW(username, domain, password, logonType, logonProvider, out hToken))
+ throw new Win32Exception(String.Format("Failed to logon {0}",
+ String.IsNullOrEmpty(domain) ? username : domain + "\\" + username));
+
+ return hToken;
+ }
+
+ public static SafeNativeHandle OpenProcess()
+ {
+ return NativeMethods.GetCurrentProcess();
+ }
+
+ public static SafeNativeHandle OpenProcess(Int32 pid, ProcessAccessFlags access, bool inherit)
+ {
+ SafeNativeHandle hProcess = NativeMethods.OpenProcess(access, inherit, (UInt32)pid);
+ if (hProcess.IsInvalid)
+ throw new Win32Exception(String.Format("Failed to open process {0} with access {1}",
+ pid, access.ToString()));
+
+ return hProcess;
+ }
+
+ public static SafeNativeHandle OpenProcessToken(SafeNativeHandle hProcess, TokenAccessLevels access)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.OpenProcessToken(hProcess, access, out hToken))
+ throw new Win32Exception(String.Format("Failed to open process token with access {0}",
+ access.ToString()));
+
+ return hToken;
+ }
+
+ public static void RevertToSelf()
+ {
+ if (!NativeMethods.RevertToSelf())
+ throw new Win32Exception("Failed to revert thread impersonation");
+ }
+
+ internal static string GetPrivilegeName(Luid luid)
+ {
+ UInt32 nameLen = 0;
+ NativeMethods.LookupPrivilegeNameW(null, ref luid, null, ref nameLen);
+
+ StringBuilder name = new StringBuilder((int)(nameLen + 1));
+ if (!NativeMethods.LookupPrivilegeNameW(null, ref luid, name, ref nameLen))
+ throw new Win32Exception("LookupPrivilegeName() failed");
+
+ return name.ToString();
+ }
+
+ private static SafeMemoryBuffer GetTokenInformation(SafeNativeHandle hToken,
+ NativeHelpers.TokenInformationClass infoClass)
+ {
+ UInt32 tokenLength;
+ bool res = NativeMethods.GetTokenInformation(hToken, infoClass, new SafeMemoryBuffer(IntPtr.Zero), 0,
+ out tokenLength);
+ int errCode = Marshal.GetLastWin32Error();
+ if (!res && errCode != 24 && errCode != 122) // ERROR_INSUFFICIENT_BUFFER, ERROR_BAD_LENGTH
+ throw new Win32Exception(errCode, String.Format("GetTokenInformation({0}) failed to get buffer length",
+ infoClass.ToString()));
+
+ SafeMemoryBuffer tokenInfo = new SafeMemoryBuffer((int)tokenLength);
+ if (!NativeMethods.GetTokenInformation(hToken, infoClass, tokenInfo, tokenLength, out tokenLength))
+ throw new Win32Exception(String.Format("GetTokenInformation({0}) failed", infoClass.ToString()));
+
+ return tokenInfo;
+ }
+
+ private static void PtrToStructureArray<T>(T[] array, IntPtr ptr)
+ {
+ IntPtr ptrOffset = ptr;
+ for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T))))
+ array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T));
+ }
+
+ private static SafeNativeHandle TryOpenAccessToken(System.Diagnostics.Process process, TokenAccessLevels access)
+ {
+ try
+ {
+ using (SafeNativeHandle hProcess = OpenProcess(process.Id, ProcessAccessFlags.QueryInformation, false))
+ return OpenProcessToken(hProcess, access);
+ }
+ catch (Win32Exception)
+ {
+ return null;
+ }
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
new file mode 100644
index 0000000..c68281e
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
@@ -0,0 +1,1489 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using System.Management.Automation;
+using System.Management.Automation.Runspaces;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Security.AccessControl;
+using System.Security.Principal;
+#if CORECLR
+using Newtonsoft.Json;
+#else
+using System.Web.Script.Serialization;
+#endif
+
+// Newtonsoft.Json may reference a different System.Runtime version (6.x) than loaded by PowerShell 7.3 (7.x).
+// Ignore CS1701 so the code can be compiled when warnings are reported as errors.
+//NoWarn -Name CS1701 -CLR Core
+
+// System.Diagnostics.EventLog.dll reference different versioned dlls that are
+// loaded in PSCore, ignore CS1702 so the code will ignore this warning
+//NoWarn -Name CS1702 -CLR Core
+
+//AssemblyReference -Type Newtonsoft.Json.JsonConvert -CLR Core
+//AssemblyReference -Type System.Diagnostics.EventLog -CLR Core
+//AssemblyReference -Type System.Security.AccessControl.NativeObjectSecurity -CLR Core
+//AssemblyReference -Type System.Security.AccessControl.DirectorySecurity -CLR Core
+//AssemblyReference -Type System.Security.Principal.IdentityReference -CLR Core
+
+//AssemblyReference -Name System.Web.Extensions.dll -CLR Framework
+
+namespace Ansible.Basic
+{
+ public class AnsibleModule
+ {
+ public delegate void ExitHandler(int rc);
+ public static ExitHandler Exit = new ExitHandler(ExitModule);
+
+ public delegate void WriteLineHandler(string line);
+ public static WriteLineHandler WriteLine = new WriteLineHandler(WriteLineModule);
+
+ public static bool _DebugArgSpec = false;
+
+ private static List<string> BOOLEANS_TRUE = new List<string>() { "y", "yes", "on", "1", "true", "t", "1.0" };
+ private static List<string> BOOLEANS_FALSE = new List<string>() { "n", "no", "off", "0", "false", "f", "0.0" };
+
+ private string remoteTmp = Path.GetTempPath();
+ private string tmpdir = null;
+ private HashSet<string> noLogValues = new HashSet<string>();
+ private List<string> optionsContext = new List<string>();
+ private List<string> warnings = new List<string>();
+ private List<Dictionary<string, string>> deprecations = new List<Dictionary<string, string>>();
+ private List<string> cleanupFiles = new List<string>();
+
+ private Dictionary<string, string> passVars = new Dictionary<string, string>()
+ {
+ // null values means no mapping, not used in Ansible.Basic.AnsibleModule
+ { "check_mode", "CheckMode" },
+ { "debug", "DebugMode" },
+ { "diff", "DiffMode" },
+ { "keep_remote_files", "KeepRemoteFiles" },
+ { "module_name", "ModuleName" },
+ { "no_log", "NoLog" },
+ { "remote_tmp", "remoteTmp" },
+ { "selinux_special_fs", null },
+ { "shell_executable", null },
+ { "socket", null },
+ { "string_conversion_action", null },
+ { "syslog_facility", null },
+ { "tmpdir", "tmpdir" },
+ { "verbosity", "Verbosity" },
+ { "version", "AnsibleVersion" },
+ };
+ private List<string> passBools = new List<string>() { "check_mode", "debug", "diff", "keep_remote_files", "no_log" };
+ private List<string> passInts = new List<string>() { "verbosity" };
+ private Dictionary<string, List<object>> specDefaults = new Dictionary<string, List<object>>()
+ {
+ // key - (default, type) - null is freeform
+ { "apply_defaults", new List<object>() { false, typeof(bool) } },
+ { "aliases", new List<object>() { typeof(List<string>), typeof(List<string>) } },
+ { "choices", new List<object>() { typeof(List<object>), typeof(List<object>) } },
+ { "default", new List<object>() { null, null } },
+ { "deprecated_aliases", new List<object>() { typeof(List<Hashtable>), typeof(List<Hashtable>) } },
+ { "elements", new List<object>() { null, null } },
+ { "mutually_exclusive", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "no_log", new List<object>() { false, typeof(bool) } },
+ { "options", new List<object>() { typeof(Hashtable), typeof(Hashtable) } },
+ { "removed_in_version", new List<object>() { null, typeof(string) } },
+ { "removed_at_date", new List<object>() { null, typeof(DateTime) } },
+ { "removed_from_collection", new List<object>() { null, typeof(string) } },
+ { "required", new List<object>() { false, typeof(bool) } },
+ { "required_by", new List<object>() { typeof(Hashtable), typeof(Hashtable) } },
+ { "required_if", new List<object>() { typeof(List<List<object>>), typeof(List<object>) } },
+ { "required_one_of", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "required_together", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "supports_check_mode", new List<object>() { false, typeof(bool) } },
+ { "type", new List<object>() { "str", null } },
+ };
+ private Dictionary<string, Delegate> optionTypes = new Dictionary<string, Delegate>()
+ {
+ { "bool", new Func<object, bool>(ParseBool) },
+ { "dict", new Func<object, Dictionary<string, object>>(ParseDict) },
+ { "float", new Func<object, float>(ParseFloat) },
+ { "int", new Func<object, int>(ParseInt) },
+ { "json", new Func<object, string>(ParseJson) },
+ { "list", new Func<object, List<object>>(ParseList) },
+ { "path", new Func<object, string>(ParsePath) },
+ { "raw", new Func<object, object>(ParseRaw) },
+ { "sid", new Func<object, SecurityIdentifier>(ParseSid) },
+ { "str", new Func<object, string>(ParseStr) },
+ };
+
+ public Dictionary<string, object> Diff = new Dictionary<string, object>();
+ public IDictionary Params = null;
+ public Dictionary<string, object> Result = new Dictionary<string, object>() { { "changed", false } };
+
+ public bool CheckMode { get; private set; }
+ public bool DebugMode { get; private set; }
+ public bool DiffMode { get; private set; }
+ public bool KeepRemoteFiles { get; private set; }
+ public string ModuleName { get; private set; }
+ public bool NoLog { get; private set; }
+ public int Verbosity { get; private set; }
+ public string AnsibleVersion { get; private set; }
+
+ public string Tmpdir
+ {
+ get
+ {
+ if (tmpdir == null)
+ {
+#if WINDOWS
+ SecurityIdentifier user = WindowsIdentity.GetCurrent().User;
+ DirectorySecurity dirSecurity = new DirectorySecurity();
+ dirSecurity.SetOwner(user);
+ dirSecurity.SetAccessRuleProtection(true, false); // disable inheritance rules
+ FileSystemAccessRule ace = new FileSystemAccessRule(user, FileSystemRights.FullControl,
+ InheritanceFlags.ContainerInherit | InheritanceFlags.ObjectInherit,
+ PropagationFlags.None, AccessControlType.Allow);
+ dirSecurity.AddAccessRule(ace);
+
+ string baseDir = Path.GetFullPath(Environment.ExpandEnvironmentVariables(remoteTmp));
+ if (!Directory.Exists(baseDir))
+ {
+ string failedMsg = null;
+ try
+ {
+#if CORECLR
+ DirectoryInfo createdDir = Directory.CreateDirectory(baseDir);
+ FileSystemAclExtensions.SetAccessControl(createdDir, dirSecurity);
+#else
+ Directory.CreateDirectory(baseDir, dirSecurity);
+#endif
+ }
+ catch (Exception e)
+ {
+ failedMsg = String.Format("Failed to create base tmpdir '{0}': {1}", baseDir, e.Message);
+ }
+
+ if (failedMsg != null)
+ {
+ string envTmp = Path.GetTempPath();
+ Warn(String.Format("Unable to use '{0}' as temporary directory, falling back to system tmp '{1}': {2}", baseDir, envTmp, failedMsg));
+ baseDir = envTmp;
+ }
+ else
+ {
+ NTAccount currentUser = (NTAccount)user.Translate(typeof(NTAccount));
+ string warnMsg = String.Format("Module remote_tmp {0} did not exist and was created with FullControl to {1}, ", baseDir, currentUser.ToString());
+ warnMsg += "this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually";
+ Warn(warnMsg);
+ }
+ }
+
+ string dateTime = DateTime.Now.ToFileTime().ToString();
+ string dirName = String.Format("ansible-moduletmp-{0}-{1}", dateTime, new Random().Next(0, int.MaxValue));
+ string newTmpdir = Path.Combine(baseDir, dirName);
+#if CORECLR
+ DirectoryInfo tmpdirInfo = Directory.CreateDirectory(newTmpdir);
+ FileSystemAclExtensions.SetAccessControl(tmpdirInfo, dirSecurity);
+#else
+ Directory.CreateDirectory(newTmpdir, dirSecurity);
+#endif
+ tmpdir = newTmpdir;
+
+ if (!KeepRemoteFiles)
+ cleanupFiles.Add(tmpdir);
+#else
+ throw new NotImplementedException("Tmpdir is only supported on Windows");
+#endif
+ }
+ return tmpdir;
+ }
+ }
+
+ public AnsibleModule(string[] args, IDictionary argumentSpec, IDictionary[] fragments = null)
+ {
+ // NoLog is not set yet, we cannot rely on FailJson to sanitize the output
+ // Do the minimum amount to get this running before we actually parse the params
+ Dictionary<string, string> aliases = new Dictionary<string, string>();
+ try
+ {
+ ValidateArgumentSpec(argumentSpec);
+
+ // Merge the fragments if present into the main arg spec.
+ if (fragments != null)
+ {
+ foreach (IDictionary fragment in fragments)
+ {
+ ValidateArgumentSpec(fragment);
+ MergeFragmentSpec(argumentSpec, fragment);
+ }
+ }
+
+ // Used by ansible-test to retrieve the module argument spec, not designed for public use.
+ if (_DebugArgSpec)
+ {
+ // Cannot call exit here because it will be caught with the catch (Exception e) below. Instead
+ // just throw a new exception with a specific message and the exception block will handle it.
+ ScriptBlock.Create("Set-Variable -Name ansibleTestArgSpec -Value $args[0] -Scope Global"
+ ).Invoke(argumentSpec);
+ throw new Exception("ansible-test validate-modules check");
+ }
+
+ // Now make sure all the metadata keys are set to their defaults, this must be done after we've
+ // potentially output the arg spec for ansible-test.
+ SetArgumentSpecDefaults(argumentSpec);
+
+ Params = GetParams(args);
+ aliases = GetAliases(argumentSpec, Params);
+ SetNoLogValues(argumentSpec, Params);
+ }
+ catch (Exception e)
+ {
+ if (e.Message == "ansible-test validate-modules check")
+ Exit(0);
+
+ Dictionary<string, object> result = new Dictionary<string, object>
+ {
+ { "failed", true },
+ { "msg", String.Format("internal error: {0}", e.Message) },
+ { "exception", e.ToString() }
+ };
+ WriteLine(ToJson(result));
+ Exit(1);
+ }
+
+ // Initialise public properties to the defaults before we parse the actual inputs
+ CheckMode = false;
+ DebugMode = false;
+ DiffMode = false;
+ KeepRemoteFiles = false;
+ ModuleName = "undefined win module";
+ NoLog = (bool)argumentSpec["no_log"];
+ Verbosity = 0;
+ AppDomain.CurrentDomain.ProcessExit += CleanupFiles;
+
+ List<string> legalInputs = passVars.Keys.Select(v => "_ansible_" + v).ToList();
+ legalInputs.AddRange(((IDictionary)argumentSpec["options"]).Keys.Cast<string>().ToList());
+ legalInputs.AddRange(aliases.Keys.Cast<string>().ToList());
+ CheckArguments(argumentSpec, Params, legalInputs);
+
+ // Set a Ansible friendly invocation value in the result object
+ Dictionary<string, object> invocation = new Dictionary<string, object>() { { "module_args", Params } };
+ Result["invocation"] = RemoveNoLogValues(invocation, noLogValues);
+
+ if (!NoLog)
+ LogEvent(String.Format("Invoked with:\r\n {0}", FormatLogData(Params, 2)), sanitise: false);
+ }
+
+ public static AnsibleModule Create(string[] args, IDictionary argumentSpec, IDictionary[] fragments = null)
+ {
+ return new AnsibleModule(args, argumentSpec, fragments);
+ }
+
+ public void Debug(string message)
+ {
+ if (DebugMode)
+ LogEvent(String.Format("[DEBUG] {0}", message));
+ }
+
+ public void Deprecate(string message, string version)
+ {
+ Deprecate(message, version, null);
+ }
+
+ public void Deprecate(string message, string version, string collectionName)
+ {
+ deprecations.Add(new Dictionary<string, string>() {
+ { "msg", message }, { "version", version }, { "collection_name", collectionName } });
+ LogEvent(String.Format("[DEPRECATION WARNING] {0} {1}", message, version));
+ }
+
+ public void Deprecate(string message, DateTime date)
+ {
+ Deprecate(message, date, null);
+ }
+
+ public void Deprecate(string message, DateTime date, string collectionName)
+ {
+ string isoDate = date.ToString("yyyy-MM-dd");
+ deprecations.Add(new Dictionary<string, string>() {
+ { "msg", message }, { "date", isoDate }, { "collection_name", collectionName } });
+ LogEvent(String.Format("[DEPRECATION WARNING] {0} {1}", message, isoDate));
+ }
+
+ public void ExitJson()
+ {
+ WriteLine(GetFormattedResults(Result));
+ CleanupFiles(null, null);
+ Exit(0);
+ }
+
+ public void FailJson(string message) { FailJson(message, null, null); }
+ public void FailJson(string message, ErrorRecord psErrorRecord) { FailJson(message, psErrorRecord, null); }
+ public void FailJson(string message, Exception exception) { FailJson(message, null, exception); }
+ private void FailJson(string message, ErrorRecord psErrorRecord, Exception exception)
+ {
+ Result["failed"] = true;
+ Result["msg"] = RemoveNoLogValues(message, noLogValues);
+
+
+ if (!Result.ContainsKey("exception") && (Verbosity > 2 || DebugMode))
+ {
+ if (psErrorRecord != null)
+ {
+ string traceback = String.Format("{0}\r\n{1}", psErrorRecord.ToString(), psErrorRecord.InvocationInfo.PositionMessage);
+ traceback += String.Format("\r\n + CategoryInfo : {0}", psErrorRecord.CategoryInfo.ToString());
+ traceback += String.Format("\r\n + FullyQualifiedErrorId : {0}", psErrorRecord.FullyQualifiedErrorId.ToString());
+ traceback += String.Format("\r\n\r\nScriptStackTrace:\r\n{0}", psErrorRecord.ScriptStackTrace);
+ Result["exception"] = traceback;
+ }
+ else if (exception != null)
+ Result["exception"] = exception.ToString();
+ }
+
+ WriteLine(GetFormattedResults(Result));
+ CleanupFiles(null, null);
+ Exit(1);
+ }
+
+ public void LogEvent(string message, EventLogEntryType logEntryType = EventLogEntryType.Information, bool sanitise = true)
+ {
+ if (NoLog)
+ return;
+
+#if WINDOWS
+ string logSource = "Ansible";
+ bool logSourceExists = false;
+ try
+ {
+ logSourceExists = EventLog.SourceExists(logSource);
+ }
+ catch (System.Security.SecurityException) { } // non admin users may not have permission
+
+ if (!logSourceExists)
+ {
+ try
+ {
+ EventLog.CreateEventSource(logSource, "Application");
+ }
+ catch (System.Security.SecurityException)
+ {
+ // Cannot call Warn as that calls LogEvent and we get stuck in a loop
+ warnings.Add(String.Format("Access error when creating EventLog source {0}, logging to the Application source instead", logSource));
+ logSource = "Application";
+ }
+ }
+ if (sanitise)
+ message = (string)RemoveNoLogValues(message, noLogValues);
+ message = String.Format("{0} - {1}", ModuleName, message);
+
+ using (EventLog eventLog = new EventLog("Application"))
+ {
+ eventLog.Source = logSource;
+ try
+ {
+ eventLog.WriteEntry(message, logEntryType, 0);
+ }
+ catch (System.InvalidOperationException) { } // Ignore permission errors on the Application event log
+ catch (System.Exception e)
+ {
+ // Cannot call Warn as that calls LogEvent and we get stuck in a loop
+ warnings.Add(String.Format("Unknown error when creating event log entry: {0}", e.Message));
+ }
+ }
+#else
+ // Windows Event Log is only available on Windows
+ return;
+#endif
+ }
+
+ public void Warn(string message)
+ {
+ warnings.Add(message);
+ LogEvent(String.Format("[WARNING] {0}", message), EventLogEntryType.Warning);
+ }
+
+ public static object FromJson(string json) { return FromJson<object>(json); }
+ public static T FromJson<T>(string json)
+ {
+#if CORECLR
+ return JsonConvert.DeserializeObject<T>(json);
+#else
+ JavaScriptSerializer jss = new JavaScriptSerializer();
+ jss.MaxJsonLength = int.MaxValue;
+ jss.RecursionLimit = int.MaxValue;
+ return jss.Deserialize<T>(json);
+#endif
+ }
+
+ public static string ToJson(object obj)
+ {
+ // Using PowerShell to serialize the JSON is preferable over the native .NET libraries as it handles
+ // PS Objects a lot better than the alternatives. In case we are debugging in Visual Studio we have a
+ // fallback to the other libraries as we won't be dealing with PowerShell objects there.
+ if (Runspace.DefaultRunspace != null)
+ {
+ PSObject rawOut = ScriptBlock.Create("ConvertTo-Json -InputObject $args[0] -Depth 99 -Compress").Invoke(obj)[0];
+ return rawOut.BaseObject as string;
+ }
+ else
+ {
+#if CORECLR
+ return JsonConvert.SerializeObject(obj);
+#else
+ JavaScriptSerializer jss = new JavaScriptSerializer();
+ jss.MaxJsonLength = int.MaxValue;
+ jss.RecursionLimit = int.MaxValue;
+ return jss.Serialize(obj);
+#endif
+ }
+ }
+
+ public static IDictionary GetParams(string[] args)
+ {
+ if (args.Length > 0)
+ {
+ string inputJson = File.ReadAllText(args[0]);
+ Dictionary<string, object> rawParams = FromJson<Dictionary<string, object>>(inputJson);
+ if (!rawParams.ContainsKey("ANSIBLE_MODULE_ARGS"))
+ throw new ArgumentException("Module was unable to get ANSIBLE_MODULE_ARGS value from the argument path json");
+ return (IDictionary)rawParams["ANSIBLE_MODULE_ARGS"];
+ }
+ else
+ {
+ // $complex_args is already a Hashtable, no need to waste time converting to a dictionary
+ PSObject rawArgs = ScriptBlock.Create("$complex_args").Invoke()[0];
+ return rawArgs.BaseObject as Hashtable;
+ }
+ }
+
+ public static bool ParseBool(object value)
+ {
+ if (value.GetType() == typeof(bool))
+ return (bool)value;
+
+ List<string> booleans = new List<string>();
+ booleans.AddRange(BOOLEANS_TRUE);
+ booleans.AddRange(BOOLEANS_FALSE);
+
+ string stringValue = ParseStr(value).ToLowerInvariant().Trim();
+ if (BOOLEANS_TRUE.Contains(stringValue))
+ return true;
+ else if (BOOLEANS_FALSE.Contains(stringValue))
+ return false;
+
+ string msg = String.Format("The value '{0}' is not a valid boolean. Valid booleans include: {1}",
+ stringValue, String.Join(", ", booleans));
+ throw new ArgumentException(msg);
+ }
+
+ public static Dictionary<string, object> ParseDict(object value)
+ {
+ Type valueType = value.GetType();
+ if (valueType == typeof(Dictionary<string, object>))
+ return (Dictionary<string, object>)value;
+ else if (value is IDictionary)
+ return ((IDictionary)value).Cast<DictionaryEntry>().ToDictionary(kvp => (string)kvp.Key, kvp => kvp.Value);
+ else if (valueType == typeof(string))
+ {
+ string stringValue = (string)value;
+ if (stringValue.StartsWith("{") && stringValue.EndsWith("}"))
+ return FromJson<Dictionary<string, object>>((string)value);
+ else if (stringValue.IndexOfAny(new char[1] { '=' }) != -1)
+ {
+ List<string> fields = new List<string>();
+ List<char> fieldBuffer = new List<char>();
+ char? inQuote = null;
+ bool inEscape = false;
+ string field;
+
+ foreach (char c in stringValue.ToCharArray())
+ {
+ if (inEscape)
+ {
+ fieldBuffer.Add(c);
+ inEscape = false;
+ }
+ else if (c == '\\')
+ inEscape = true;
+ else if (inQuote == null && (c == '\'' || c == '"'))
+ inQuote = c;
+ else if (inQuote != null && c == inQuote)
+ inQuote = null;
+ else if (inQuote == null && (c == ',' || c == ' '))
+ {
+ field = String.Join("", fieldBuffer);
+ if (field != "")
+ fields.Add(field);
+ fieldBuffer = new List<char>();
+ }
+ else
+ fieldBuffer.Add(c);
+ }
+
+ field = String.Join("", fieldBuffer);
+ if (field != "")
+ fields.Add(field);
+
+ return fields.Distinct().Select(i => i.Split(new[] { '=' }, 2)).ToDictionary(i => i[0], i => i.Length > 1 ? (object)i[1] : null);
+ }
+ else
+ throw new ArgumentException("string cannot be converted to a dict, must either be a JSON string or in the key=value form");
+ }
+
+ throw new ArgumentException(String.Format("{0} cannot be converted to a dict", valueType.FullName));
+ }
+
+ public static float ParseFloat(object value)
+ {
+ if (value.GetType() == typeof(float))
+ return (float)value;
+
+ string valueStr = ParseStr(value);
+ return float.Parse(valueStr);
+ }
+
+ public static int ParseInt(object value)
+ {
+ Type valueType = value.GetType();
+ if (valueType == typeof(int))
+ return (int)value;
+ else
+ return Int32.Parse(ParseStr(value));
+ }
+
+ public static string ParseJson(object value)
+ {
+ // mostly used to ensure a dict is a json string as it may
+ // have been converted on the controller side
+ Type valueType = value.GetType();
+ if (value is IDictionary)
+ return ToJson(value);
+ else if (valueType == typeof(string))
+ return (string)value;
+ else
+ throw new ArgumentException(String.Format("{0} cannot be converted to json", valueType.FullName));
+ }
+
+ public static List<object> ParseList(object value)
+ {
+ if (value == null)
+ return null;
+
+ Type valueType = value.GetType();
+ if (valueType.IsGenericType && valueType.GetGenericTypeDefinition() == typeof(List<>))
+ return (List<object>)value;
+ else if (valueType == typeof(ArrayList))
+ return ((ArrayList)value).Cast<object>().ToList();
+ else if (valueType.IsArray)
+ return ((object[])value).ToList();
+ else if (valueType == typeof(string))
+ return ((string)value).Split(',').Select(s => s.Trim()).ToList<object>();
+ else if (valueType == typeof(int))
+ return new List<object>() { value };
+ else
+ throw new ArgumentException(String.Format("{0} cannot be converted to a list", valueType.FullName));
+ }
+
+ public static string ParsePath(object value)
+ {
+ string stringValue = ParseStr(value);
+
+ // do not validate, expand the env vars if it starts with \\?\ as
+ // it is a special path designed for the NT kernel to interpret
+ if (stringValue.StartsWith(@"\\?\"))
+ return stringValue;
+
+ stringValue = Environment.ExpandEnvironmentVariables(stringValue);
+ if (stringValue.IndexOfAny(Path.GetInvalidPathChars()) != -1)
+ throw new ArgumentException("string value contains invalid path characters, cannot convert to path");
+
+ // will fire an exception if it contains any invalid chars
+ Path.GetFullPath(stringValue);
+ return stringValue;
+ }
+
+ public static object ParseRaw(object value) { return value; }
+
+ public static SecurityIdentifier ParseSid(object value)
+ {
+ string stringValue = ParseStr(value);
+
+ try
+ {
+ return new SecurityIdentifier(stringValue);
+ }
+ catch (ArgumentException) { } // ignore failures string may not have been a SID
+
+ NTAccount account = new NTAccount(stringValue);
+ return (SecurityIdentifier)account.Translate(typeof(SecurityIdentifier));
+ }
+
+ public static string ParseStr(object value) { return value.ToString(); }
+
+ private void ValidateArgumentSpec(IDictionary argumentSpec)
+ {
+ Dictionary<string, object> changedValues = new Dictionary<string, object>();
+ foreach (DictionaryEntry entry in argumentSpec)
+ {
+ string key = (string)entry.Key;
+
+ // validate the key is a valid argument spec key
+ if (!specDefaults.ContainsKey(key))
+ {
+ string msg = String.Format("argument spec entry contains an invalid key '{0}', valid keys: {1}",
+ key, String.Join(", ", specDefaults.Keys));
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+
+ // ensure the value is casted to the type we expect
+ Type optionType = null;
+ if (entry.Value != null)
+ optionType = (Type)specDefaults[key][1];
+ if (optionType != null)
+ {
+ Type actualType = entry.Value.GetType();
+ bool invalid = false;
+ if (optionType.IsGenericType && optionType.GetGenericTypeDefinition() == typeof(List<>))
+ {
+ // verify the actual type is not just a single value of the list type
+ Type entryType = optionType.GetGenericArguments()[0];
+ object[] arrayElementTypes = new object[]
+ {
+ null, // ArrayList does not have an ElementType
+ entryType,
+ typeof(object), // Hope the object is actually entryType or it can at least be casted.
+ };
+
+ bool isArray = entry.Value is IList && arrayElementTypes.Contains(actualType.GetElementType());
+ if (actualType == entryType || isArray)
+ {
+ object rawArray;
+ if (isArray)
+ rawArray = entry.Value;
+ else
+ rawArray = new object[1] { entry.Value };
+
+ MethodInfo castMethod = typeof(Enumerable).GetMethod("Cast").MakeGenericMethod(entryType);
+ MethodInfo toListMethod = typeof(Enumerable).GetMethod("ToList").MakeGenericMethod(entryType);
+
+ var enumerable = castMethod.Invoke(null, new object[1] { rawArray });
+ var newList = toListMethod.Invoke(null, new object[1] { enumerable });
+ changedValues.Add(key, newList);
+ }
+ else if (actualType != optionType && !(actualType == typeof(List<object>)))
+ invalid = true;
+ }
+ else
+ invalid = actualType != optionType;
+
+ if (invalid)
+ {
+ string msg = String.Format("argument spec for '{0}' did not match expected type {1}: actual type {2}",
+ key, optionType.FullName, actualType.FullName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+
+ // recursively validate the spec
+ if (key == "options" && entry.Value != null)
+ {
+ IDictionary optionsSpec = (IDictionary)entry.Value;
+ foreach (DictionaryEntry optionEntry in optionsSpec)
+ {
+ optionsContext.Add((string)optionEntry.Key);
+ IDictionary optionMeta = (IDictionary)optionEntry.Value;
+ ValidateArgumentSpec(optionMeta);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ }
+ }
+
+ // validate the type and elements key type values are known types
+ if (key == "type" || key == "elements" && entry.Value != null)
+ {
+ Type valueType = entry.Value.GetType();
+ if (valueType == typeof(string))
+ {
+ string typeValue = (string)entry.Value;
+ if (!optionTypes.ContainsKey(typeValue))
+ {
+ string msg = String.Format("{0} '{1}' is unsupported", key, typeValue);
+ msg = String.Format("{0}. Valid types are: {1}", FormatOptionsContext(msg, " - "), String.Join(", ", optionTypes.Keys));
+ throw new ArgumentException(msg);
+ }
+ }
+ else if (!(entry.Value is Delegate))
+ {
+ string msg = String.Format("{0} must either be a string or delegate, was: {1}", key, valueType.FullName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+ }
+
+ // Outside of the spec iterator, change the values that were casted above
+ foreach (KeyValuePair<string, object> changedValue in changedValues)
+ argumentSpec[changedValue.Key] = changedValue.Value;
+ }
+
+ private void MergeFragmentSpec(IDictionary argumentSpec, IDictionary fragment)
+ {
+ foreach (DictionaryEntry fragmentEntry in fragment)
+ {
+ string fragmentKey = fragmentEntry.Key.ToString();
+
+ if (argumentSpec.Contains(fragmentKey))
+ {
+ // We only want to add new list entries and merge dictionary new keys and values. Leave the other
+ // values as is in the argument spec as that takes priority over the fragment.
+ if (fragmentEntry.Value is IDictionary)
+ {
+ MergeFragmentSpec((IDictionary)argumentSpec[fragmentKey], (IDictionary)fragmentEntry.Value);
+ }
+ else if (fragmentEntry.Value is IList)
+ {
+ IList specValue = (IList)argumentSpec[fragmentKey];
+ foreach (object fragmentValue in (IList)fragmentEntry.Value)
+ specValue.Add(fragmentValue);
+ }
+ }
+ else
+ argumentSpec[fragmentKey] = fragmentEntry.Value;
+ }
+ }
+
+ private void SetArgumentSpecDefaults(IDictionary argumentSpec)
+ {
+ foreach (KeyValuePair<string, List<object>> metadataEntry in specDefaults)
+ {
+ List<object> defaults = metadataEntry.Value;
+ object defaultValue = defaults[0];
+ if (defaultValue != null && defaultValue.GetType() == typeof(Type).GetType())
+ defaultValue = Activator.CreateInstance((Type)defaultValue);
+
+ if (!argumentSpec.Contains(metadataEntry.Key))
+ argumentSpec[metadataEntry.Key] = defaultValue;
+ }
+
+ // Recursively set the defaults for any inner options.
+ foreach (DictionaryEntry entry in argumentSpec)
+ {
+ if (entry.Value == null || entry.Key.ToString() != "options")
+ continue;
+
+ IDictionary optionsSpec = (IDictionary)entry.Value;
+ foreach (DictionaryEntry optionEntry in optionsSpec)
+ {
+ optionsContext.Add((string)optionEntry.Key);
+ IDictionary optionMeta = (IDictionary)optionEntry.Value;
+ SetArgumentSpecDefaults(optionMeta);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ }
+ }
+ }
+
+ private Dictionary<string, string> GetAliases(IDictionary argumentSpec, IDictionary parameters)
+ {
+ Dictionary<string, string> aliasResults = new Dictionary<string, string>();
+
+ foreach (DictionaryEntry entry in (IDictionary)argumentSpec["options"])
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ List<string> aliases = (List<string>)v["aliases"];
+ object defaultValue = v["default"];
+ bool required = (bool)v["required"];
+
+ if (defaultValue != null && required)
+ throw new ArgumentException(String.Format("required and default are mutually exclusive for {0}", k));
+
+ foreach (string alias in aliases)
+ {
+ aliasResults.Add(alias, k);
+ if (parameters.Contains(alias))
+ parameters[k] = parameters[alias];
+ }
+
+ List<Hashtable> deprecatedAliases = (List<Hashtable>)v["deprecated_aliases"];
+ foreach (Hashtable depInfo in deprecatedAliases)
+ {
+ foreach (string keyName in new List<string> { "name" })
+ {
+ if (!depInfo.ContainsKey(keyName))
+ {
+ string msg = String.Format("{0} is required in a deprecated_aliases entry", keyName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+ if (!depInfo.ContainsKey("version") && !depInfo.ContainsKey("date"))
+ {
+ string msg = "One of version or date is required in a deprecated_aliases entry";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ if (depInfo.ContainsKey("version") && depInfo.ContainsKey("date"))
+ {
+ string msg = "Only one of version or date is allowed in a deprecated_aliases entry";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ if (depInfo.ContainsKey("date") && depInfo["date"].GetType() != typeof(DateTime))
+ {
+ string msg = "A deprecated_aliases date must be a DateTime object";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ string collectionName = null;
+ if (depInfo.ContainsKey("collection_name"))
+ {
+ collectionName = (string)depInfo["collection_name"];
+ }
+ string aliasName = (string)depInfo["name"];
+
+ if (parameters.Contains(aliasName))
+ {
+ string msg = String.Format("Alias '{0}' is deprecated. See the module docs for more information", aliasName);
+ if (depInfo.ContainsKey("version"))
+ {
+ string depVersion = (string)depInfo["version"];
+ Deprecate(FormatOptionsContext(msg, " - "), depVersion, collectionName);
+ }
+ if (depInfo.ContainsKey("date"))
+ {
+ DateTime depDate = (DateTime)depInfo["date"];
+ Deprecate(FormatOptionsContext(msg, " - "), depDate, collectionName);
+ }
+ }
+ }
+ }
+
+ return aliasResults;
+ }
+
+ private void SetNoLogValues(IDictionary argumentSpec, IDictionary parameters)
+ {
+ foreach (DictionaryEntry entry in (IDictionary)argumentSpec["options"])
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ if ((bool)v["no_log"])
+ {
+ object noLogObject = parameters.Contains(k) ? parameters[k] : null;
+ string noLogString = noLogObject == null ? "" : noLogObject.ToString();
+ if (!String.IsNullOrEmpty(noLogString))
+ noLogValues.Add(noLogString);
+ }
+ string collectionName = null;
+ if (v.ContainsKey("removed_from_collection"))
+ {
+ collectionName = (string)v["removed_from_collection"];
+ }
+
+ object removedInVersion = v["removed_in_version"];
+ if (removedInVersion != null && parameters.Contains(k))
+ Deprecate(String.Format("Param '{0}' is deprecated. See the module docs for more information", k),
+ removedInVersion.ToString(), collectionName);
+
+ object removedAtDate = v["removed_at_date"];
+ if (removedAtDate != null && parameters.Contains(k))
+ Deprecate(String.Format("Param '{0}' is deprecated. See the module docs for more information", k),
+ (DateTime)removedAtDate, collectionName);
+ }
+ }
+
+ private void CheckArguments(IDictionary spec, IDictionary param, List<string> legalInputs)
+ {
+ // initially parse the params and check for unsupported ones and set internal vars
+ CheckUnsupportedArguments(param, legalInputs);
+
+ // Only run this check if we are at the root argument (optionsContext.Count == 0)
+ if (CheckMode && !(bool)spec["supports_check_mode"] && optionsContext.Count == 0)
+ {
+ Result["skipped"] = true;
+ Result["msg"] = String.Format("remote module ({0}) does not support check mode", ModuleName);
+ ExitJson();
+ }
+ IDictionary optionSpec = (IDictionary)spec["options"];
+
+ CheckMutuallyExclusive(param, (IList)spec["mutually_exclusive"]);
+ CheckRequiredArguments(optionSpec, param);
+
+ // set the parameter types based on the type spec value
+ foreach (DictionaryEntry entry in optionSpec)
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ object value = param.Contains(k) ? param[k] : null;
+ if (value != null)
+ {
+ // convert the current value to the wanted type
+ Delegate typeConverter;
+ string type;
+ if (v["type"].GetType() == typeof(string))
+ {
+ type = (string)v["type"];
+ typeConverter = optionTypes[type];
+ }
+ else
+ {
+ type = "delegate";
+ typeConverter = (Delegate)v["type"];
+ }
+
+ try
+ {
+ value = typeConverter.DynamicInvoke(value);
+ param[k] = value;
+ }
+ catch (Exception e)
+ {
+ string msg = String.Format("argument for {0} is of type {1} and we were unable to convert to {2}: {3}",
+ k, value.GetType(), type, e.InnerException.Message);
+ FailJson(FormatOptionsContext(msg));
+ }
+
+ // ensure it matches the choices if there are choices set
+ List<string> choices = ((List<object>)v["choices"]).Select(x => x.ToString()).Cast<string>().ToList();
+ if (choices.Count > 0)
+ {
+ List<string> values;
+ string choiceMsg;
+ if (type == "list")
+ {
+ values = ((List<object>)value).Select(x => x.ToString()).Cast<string>().ToList();
+ choiceMsg = "one or more of";
+ }
+ else
+ {
+ values = new List<string>() { value.ToString() };
+ choiceMsg = "one of";
+ }
+
+ List<string> diffList = values.Except(choices, StringComparer.OrdinalIgnoreCase).ToList();
+ List<string> caseDiffList = values.Except(choices).ToList();
+ if (diffList.Count > 0)
+ {
+ string msg = String.Format("value of {0} must be {1}: {2}. Got no match for: {3}",
+ k, choiceMsg, String.Join(", ", choices), String.Join(", ", diffList));
+ FailJson(FormatOptionsContext(msg));
+ }
+ /*
+ For now we will just silently accept case insensitive choices, uncomment this if we want to add it back in
+ else if (caseDiffList.Count > 0)
+ {
+ // For backwards compatibility with Legacy.psm1 we need to be matching choices that are not case sensitive.
+ // We will warn the user it was case insensitive and tell them this will become case sensitive in the future.
+ string msg = String.Format(
+ "value of {0} was a case insensitive match of {1}: {2}. Checking of choices will be case sensitive in a future Ansible release. Case insensitive matches were: {3}",
+ k, choiceMsg, String.Join(", ", choices), String.Join(", ", caseDiffList.Select(x => RemoveNoLogValues(x, noLogValues)))
+ );
+ Warn(FormatOptionsContext(msg));
+ }*/
+ }
+ }
+ }
+
+ CheckRequiredTogether(param, (IList)spec["required_together"]);
+ CheckRequiredOneOf(param, (IList)spec["required_one_of"]);
+ CheckRequiredIf(param, (IList)spec["required_if"]);
+ CheckRequiredBy(param, (IDictionary)spec["required_by"]);
+
+ // finally ensure all missing parameters are set to null and handle sub options
+ foreach (DictionaryEntry entry in optionSpec)
+ {
+ string k = (string)entry.Key;
+ IDictionary v = (IDictionary)entry.Value;
+
+ if (!param.Contains(k))
+ param[k] = null;
+
+ CheckSubOption(param, k, v);
+ }
+ }
+
+ private void CheckUnsupportedArguments(IDictionary param, List<string> legalInputs)
+ {
+ HashSet<string> unsupportedParameters = new HashSet<string>();
+ HashSet<string> caseUnsupportedParameters = new HashSet<string>();
+ List<string> removedParameters = new List<string>();
+
+ foreach (DictionaryEntry entry in param)
+ {
+ string paramKey = (string)entry.Key;
+ if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
+ unsupportedParameters.Add(paramKey);
+ else if (!legalInputs.Contains(paramKey))
+ // For backwards compatibility we do not care about the case but we need to warn the users as this will
+ // change in a future Ansible release.
+ caseUnsupportedParameters.Add(paramKey);
+ else if (paramKey.StartsWith("_ansible_"))
+ {
+ removedParameters.Add(paramKey);
+ string key = paramKey.Replace("_ansible_", "");
+ // skip setting NoLog if NoLog is already set to true (set by the module)
+ // or there's no mapping for this key
+ if ((key == "no_log" && NoLog == true) || (passVars[key] == null))
+ continue;
+
+ object value = entry.Value;
+ if (passBools.Contains(key))
+ value = ParseBool(value);
+ else if (passInts.Contains(key))
+ value = ParseInt(value);
+
+ string propertyName = passVars[key];
+ PropertyInfo property = typeof(AnsibleModule).GetProperty(propertyName);
+ FieldInfo field = typeof(AnsibleModule).GetField(propertyName, BindingFlags.NonPublic | BindingFlags.Instance);
+ if (property != null)
+ property.SetValue(this, value, null);
+ else if (field != null)
+ field.SetValue(this, value);
+ else
+ FailJson(String.Format("implementation error: unknown AnsibleModule property {0}", propertyName));
+ }
+ }
+ foreach (string parameter in removedParameters)
+ param.Remove(parameter);
+
+ if (unsupportedParameters.Count > 0)
+ {
+ legalInputs.RemoveAll(x => passVars.Keys.Contains(x.Replace("_ansible_", "")));
+ string msg = String.Format("Unsupported parameters for ({0}) module: {1}", ModuleName, String.Join(", ", unsupportedParameters));
+ msg = String.Format("{0}. Supported parameters include: {1}", FormatOptionsContext(msg), String.Join(", ", legalInputs));
+ FailJson(msg);
+ }
+
+ /*
+ // Uncomment when we want to start warning users around options that are not a case sensitive match to the spec
+ if (caseUnsupportedParameters.Count > 0)
+ {
+ legalInputs.RemoveAll(x => passVars.Keys.Contains(x.Replace("_ansible_", "")));
+ string msg = String.Format("Parameters for ({0}) was a case insensitive match: {1}", ModuleName, String.Join(", ", caseUnsupportedParameters));
+ msg = String.Format("{0}. Module options will become case sensitive in a future Ansible release. Supported parameters include: {1}",
+ FormatOptionsContext(msg), String.Join(", ", legalInputs));
+ Warn(msg);
+ }*/
+
+ // Make sure we convert all the incorrect case params to the ones set by the module spec
+ foreach (string key in caseUnsupportedParameters)
+ {
+ string correctKey = legalInputs[legalInputs.FindIndex(s => s.Equals(key, StringComparison.OrdinalIgnoreCase))];
+ object value = param[key];
+ param.Remove(key);
+ param.Add(correctKey, value);
+ }
+ }
+
+ private void CheckMutuallyExclusive(IDictionary param, IList mutuallyExclusive)
+ {
+ if (mutuallyExclusive == null)
+ return;
+
+ foreach (object check in mutuallyExclusive)
+ {
+ List<string> mutualCheck = ((IList)check).Cast<string>().ToList();
+ int count = 0;
+ foreach (string entry in mutualCheck)
+ if (param.Contains(entry))
+ count++;
+
+ if (count > 1)
+ {
+ string msg = String.Format("parameters are mutually exclusive: {0}", String.Join(", ", mutualCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredArguments(IDictionary spec, IDictionary param)
+ {
+ List<string> missing = new List<string>();
+ foreach (DictionaryEntry entry in spec)
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ // set defaults for values not already set
+ object defaultValue = v["default"];
+ if (defaultValue != null && !param.Contains(k))
+ param[k] = defaultValue;
+
+ // check required arguments
+ bool required = (bool)v["required"];
+ if (required && !param.Contains(k))
+ missing.Add(k);
+ }
+ if (missing.Count > 0)
+ {
+ string msg = String.Format("missing required arguments: {0}", String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+
+ private void CheckRequiredTogether(IDictionary param, IList requiredTogether)
+ {
+ if (requiredTogether == null)
+ return;
+
+ foreach (object check in requiredTogether)
+ {
+ List<string> requiredCheck = ((IList)check).Cast<string>().ToList();
+ List<bool> found = new List<bool>();
+ foreach (string field in requiredCheck)
+ if (param.Contains(field))
+ found.Add(true);
+ else
+ found.Add(false);
+
+ if (found.Contains(true) && found.Contains(false))
+ {
+ string msg = String.Format("parameters are required together: {0}", String.Join(", ", requiredCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredOneOf(IDictionary param, IList requiredOneOf)
+ {
+ if (requiredOneOf == null)
+ return;
+
+ foreach (object check in requiredOneOf)
+ {
+ List<string> requiredCheck = ((IList)check).Cast<string>().ToList();
+ int count = 0;
+ foreach (string field in requiredCheck)
+ if (param.Contains(field))
+ count++;
+
+ if (count == 0)
+ {
+ string msg = String.Format("one of the following is required: {0}", String.Join(", ", requiredCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredIf(IDictionary param, IList requiredIf)
+ {
+ if (requiredIf == null)
+ return;
+
+ foreach (object check in requiredIf)
+ {
+ IList requiredCheck = (IList)check;
+ List<string> missing = new List<string>();
+ List<string> missingFields = new List<string>();
+ int maxMissingCount = 1;
+ bool oneRequired = false;
+
+ if (requiredCheck.Count < 3 && requiredCheck.Count < 4)
+ FailJson(String.Format("internal error: invalid required_if value count of {0}, expecting 3 or 4 entries", requiredCheck.Count));
+ else if (requiredCheck.Count == 4)
+ oneRequired = (bool)requiredCheck[3];
+
+ string key = (string)requiredCheck[0];
+ object val = requiredCheck[1];
+ IList requirements = (IList)requiredCheck[2];
+
+ if (ParseStr(param[key]) != ParseStr(val))
+ continue;
+
+ string term = "all";
+ if (oneRequired)
+ {
+ maxMissingCount = requirements.Count;
+ term = "any";
+ }
+
+ foreach (string required in requirements.Cast<string>())
+ if (!param.Contains(required))
+ missing.Add(required);
+
+ if (missing.Count >= maxMissingCount)
+ {
+ string msg = String.Format("{0} is {1} but {2} of the following are missing: {3}",
+ key, val.ToString(), term, String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredBy(IDictionary param, IDictionary requiredBy)
+ {
+ foreach (DictionaryEntry entry in requiredBy)
+ {
+ string key = (string)entry.Key;
+ if (!param.Contains(key))
+ continue;
+
+ List<string> missing = new List<string>();
+ List<string> requires = ParseList(entry.Value).Cast<string>().ToList();
+ foreach (string required in requires)
+ if (!param.Contains(required))
+ missing.Add(required);
+
+ if (missing.Count > 0)
+ {
+ string msg = String.Format("missing parameter(s) required by '{0}': {1}", key, String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckSubOption(IDictionary param, string key, IDictionary spec)
+ {
+ object value = param[key];
+
+ string type;
+ if (spec["type"].GetType() == typeof(string))
+ type = (string)spec["type"];
+ else
+ type = "delegate";
+
+ string elements = null;
+ Delegate typeConverter = null;
+ if (spec["elements"] != null && spec["elements"].GetType() == typeof(string))
+ {
+ elements = (string)spec["elements"];
+ typeConverter = optionTypes[elements];
+ }
+ else if (spec["elements"] != null)
+ {
+ elements = "delegate";
+ typeConverter = (Delegate)spec["elements"];
+ }
+
+ if (!(type == "dict" || (type == "list" && elements != null)))
+ // either not a dict, or list with the elements set, so continue
+ return;
+ else if (type == "list")
+ {
+ // cast each list element to the type specified
+ if (value == null)
+ return;
+
+ List<object> newValue = new List<object>();
+ foreach (object element in (List<object>)value)
+ {
+ if (elements == "dict")
+ newValue.Add(ParseSubSpec(spec, element, key));
+ else
+ {
+ try
+ {
+ object newElement = typeConverter.DynamicInvoke(element);
+ newValue.Add(newElement);
+ }
+ catch (Exception e)
+ {
+ string msg = String.Format("argument for list entry {0} is of type {1} and we were unable to convert to {2}: {3}",
+ key, element.GetType(), elements, e.Message);
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ param[key] = newValue;
+ }
+ else
+ param[key] = ParseSubSpec(spec, value, key);
+ }
+
+ private object ParseSubSpec(IDictionary spec, object value, string context)
+ {
+ bool applyDefaults = (bool)spec["apply_defaults"];
+
+ // set entry to an empty dict if apply_defaults is set
+ IDictionary optionsSpec = (IDictionary)spec["options"];
+ if (applyDefaults && optionsSpec.Keys.Count > 0 && value == null)
+ value = new Dictionary<string, object>();
+ else if (optionsSpec.Keys.Count == 0 || value == null)
+ return value;
+
+ optionsContext.Add(context);
+ Dictionary<string, object> newValue = (Dictionary<string, object>)ParseDict(value);
+ Dictionary<string, string> aliases = GetAliases(spec, newValue);
+ SetNoLogValues(spec, newValue);
+
+ List<string> subLegalInputs = optionsSpec.Keys.Cast<string>().ToList();
+ subLegalInputs.AddRange(aliases.Keys.Cast<string>().ToList());
+
+ CheckArguments(spec, newValue, subLegalInputs);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ return newValue;
+ }
+
+ private string GetFormattedResults(Dictionary<string, object> result)
+ {
+ if (!result.ContainsKey("invocation"))
+ result["invocation"] = new Dictionary<string, object>() { { "module_args", RemoveNoLogValues(Params, noLogValues) } };
+
+ if (warnings.Count > 0)
+ result["warnings"] = warnings;
+
+ if (deprecations.Count > 0)
+ result["deprecations"] = deprecations;
+
+ if (Diff.Count > 0 && DiffMode)
+ result["diff"] = Diff;
+
+ return ToJson(result);
+ }
+
+ private string FormatLogData(object data, int indentLevel)
+ {
+ if (data == null)
+ return "$null";
+
+ string msg = "";
+ if (data is IList)
+ {
+ string newMsg = "";
+ foreach (object value in (IList)data)
+ {
+ string entryValue = FormatLogData(value, indentLevel + 2);
+ newMsg += String.Format("\r\n{0}- {1}", new String(' ', indentLevel), entryValue);
+ }
+ msg += newMsg;
+ }
+ else if (data is IDictionary)
+ {
+ bool start = true;
+ foreach (DictionaryEntry entry in (IDictionary)data)
+ {
+ string newMsg = FormatLogData(entry.Value, indentLevel + 2);
+ if (!start)
+ msg += String.Format("\r\n{0}", new String(' ', indentLevel));
+ msg += String.Format("{0}: {1}", (string)entry.Key, newMsg);
+ start = false;
+ }
+ }
+ else
+ msg = (string)RemoveNoLogValues(ParseStr(data), noLogValues);
+
+ return msg;
+ }
+
+ private object RemoveNoLogValues(object value, HashSet<string> noLogStrings)
+ {
+ Queue<Tuple<object, object>> deferredRemovals = new Queue<Tuple<object, object>>();
+ object newValue = RemoveValueConditions(value, noLogStrings, deferredRemovals);
+
+ while (deferredRemovals.Count > 0)
+ {
+ Tuple<object, object> data = deferredRemovals.Dequeue();
+ object oldData = data.Item1;
+ object newData = data.Item2;
+
+ if (oldData is IDictionary)
+ {
+ foreach (DictionaryEntry entry in (IDictionary)oldData)
+ {
+ object newElement = RemoveValueConditions(entry.Value, noLogStrings, deferredRemovals);
+ ((IDictionary)newData).Add((string)entry.Key, newElement);
+ }
+ }
+ else
+ {
+ foreach (object element in (IList)oldData)
+ {
+ object newElement = RemoveValueConditions(element, noLogStrings, deferredRemovals);
+ ((IList)newData).Add(newElement);
+ }
+ }
+ }
+
+ return newValue;
+ }
+
+ private object RemoveValueConditions(object value, HashSet<string> noLogStrings, Queue<Tuple<object, object>> deferredRemovals)
+ {
+ if (value == null)
+ return value;
+
+ Type valueType = value.GetType();
+ HashSet<Type> numericTypes = new HashSet<Type>
+ {
+ typeof(byte), typeof(sbyte), typeof(short), typeof(ushort), typeof(int), typeof(uint),
+ typeof(long), typeof(ulong), typeof(decimal), typeof(double), typeof(float)
+ };
+
+ if (numericTypes.Contains(valueType) || valueType == typeof(bool))
+ {
+ string valueString = ParseStr(value);
+ if (noLogStrings.Contains(valueString))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ foreach (string omitMe in noLogStrings)
+ if (valueString.Contains(omitMe))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ }
+ else if (valueType == typeof(DateTime))
+ value = ((DateTime)value).ToString("o");
+ else if (value is IList)
+ {
+ List<object> newValue = new List<object>();
+ deferredRemovals.Enqueue(new Tuple<object, object>((IList)value, newValue));
+ value = newValue;
+ }
+ else if (value is IDictionary)
+ {
+ Hashtable newValue = new Hashtable();
+ deferredRemovals.Enqueue(new Tuple<object, object>((IDictionary)value, newValue));
+ value = newValue;
+ }
+ else
+ {
+ string stringValue = value.ToString();
+ if (noLogStrings.Contains(stringValue))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ foreach (string omitMe in noLogStrings)
+ if (stringValue.Contains(omitMe))
+ return (stringValue).Replace(omitMe, "********");
+ value = stringValue;
+ }
+ return value;
+ }
+
+ private void CleanupFiles(object s, EventArgs ev)
+ {
+ foreach (string path in cleanupFiles)
+ {
+ if (File.Exists(path))
+ File.Delete(path);
+ else if (Directory.Exists(path))
+ Directory.Delete(path, true);
+ }
+ cleanupFiles = new List<string>();
+ }
+
+ private string FormatOptionsContext(string msg, string prefix = " ")
+ {
+ if (optionsContext.Count > 0)
+ msg += String.Format("{0}found in {1}", prefix, String.Join(" -> ", optionsContext));
+ return msg;
+ }
+
+ [DllImport("kernel32.dll")]
+ private static extern IntPtr GetConsoleWindow();
+
+ private static void ExitModule(int rc)
+ {
+ // When running in a Runspace Environment.Exit will kill the entire
+ // process which is not what we want, detect if we are in a
+ // Runspace and call a ScriptBlock with exit instead.
+ if (Runspace.DefaultRunspace != null)
+ ScriptBlock.Create("Set-Variable -Name LASTEXITCODE -Value $args[0] -Scope Global; exit $args[0]").Invoke(rc);
+ else
+ {
+ // Used for local debugging in Visual Studio
+ if (System.Diagnostics.Debugger.IsAttached)
+ {
+ Console.WriteLine("Press enter to continue...");
+ Console.ReadLine();
+ }
+ Environment.Exit(rc);
+ }
+ }
+
+ private static void WriteLineModule(string line)
+ {
+ Console.WriteLine(line);
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs
new file mode 100644
index 0000000..a6f645c
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs
@@ -0,0 +1,655 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.AccessControl;
+using System.Security.Principal;
+using System.Text;
+using Ansible.AccessToken;
+using Ansible.Process;
+
+namespace Ansible.Become
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct KERB_S4U_LOGON
+ {
+ public UInt32 MessageType;
+ public UInt32 Flags;
+ public LSA_UNICODE_STRING ClientUpn;
+ public LSA_UNICODE_STRING ClientRealm;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Ansi)]
+ public struct LSA_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ [MarshalAs(UnmanagedType.LPStr)] public string Buffer;
+
+ public static implicit operator string(LSA_STRING s)
+ {
+ return s.Buffer;
+ }
+
+ public static implicit operator LSA_STRING(string s)
+ {
+ if (s == null)
+ s = "";
+
+ LSA_STRING lsaStr = new LSA_STRING
+ {
+ Buffer = s,
+ Length = (UInt16)s.Length,
+ MaximumLength = (UInt16)(s.Length + 1),
+ };
+ return lsaStr;
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct LSA_UNICODE_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ public IntPtr Buffer;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_LOGON_SESSION_DATA
+ {
+ public UInt32 Size;
+ public Luid LogonId;
+ public LSA_UNICODE_STRING UserName;
+ public LSA_UNICODE_STRING LogonDomain;
+ public LSA_UNICODE_STRING AuthenticationPackage;
+ public SECURITY_LOGON_TYPE LogonType;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_SOURCE
+ {
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)] public char[] SourceName;
+ public Luid SourceIdentifier;
+ }
+
+ public enum SECURITY_LOGON_TYPE
+ {
+ System = 0, // Used only by the System account
+ Interactive = 2,
+ Network,
+ Batch,
+ Service,
+ Proxy,
+ Unlock,
+ NetworkCleartext,
+ NewCredentials,
+ RemoteInteractive,
+ CachedInteractive,
+ CachedRemoteInteractive,
+ CachedUnlock
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool AllocateLocallyUniqueId(
+ out Luid Luid);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool CreateProcessWithTokenW(
+ SafeNativeHandle hToken,
+ LogonFlags dwLogonFlags,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpApplicationName,
+ StringBuilder lpCommandLine,
+ Process.NativeHelpers.ProcessCreationFlags dwCreationFlags,
+ Process.SafeMemoryBuffer lpEnvironment,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCurrentDirectory,
+ Process.NativeHelpers.STARTUPINFOEX lpStartupInfo,
+ out Process.NativeHelpers.PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll")]
+ public static extern UInt32 GetCurrentThreadId();
+
+ [DllImport("user32.dll", SetLastError = true)]
+ public static extern NoopSafeHandle GetProcessWindowStation();
+
+ [DllImport("user32.dll", SetLastError = true)]
+ public static extern NoopSafeHandle GetThreadDesktop(
+ UInt32 dwThreadId);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaDeregisterLogonProcess(
+ IntPtr LsaHandle);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaFreeReturnBuffer(
+ IntPtr Buffer);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaGetLogonSessionData(
+ ref Luid LogonId,
+ out SafeLsaMemoryBuffer ppLogonSessionData);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaLogonUser(
+ SafeLsaHandle LsaHandle,
+ NativeHelpers.LSA_STRING OriginName,
+ LogonType LogonType,
+ UInt32 AuthenticationPackage,
+ IntPtr AuthenticationInformation,
+ UInt32 AuthenticationInformationLength,
+ IntPtr LocalGroups,
+ NativeHelpers.TOKEN_SOURCE SourceContext,
+ out SafeLsaMemoryBuffer ProfileBuffer,
+ out UInt32 ProfileBufferLength,
+ out Luid LogonId,
+ out SafeNativeHandle Token,
+ out IntPtr Quotas,
+ out UInt32 SubStatus);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaLookupAuthenticationPackage(
+ SafeLsaHandle LsaHandle,
+ NativeHelpers.LSA_STRING PackageName,
+ out UInt32 AuthenticationPackage);
+
+ [DllImport("advapi32.dll")]
+ public static extern UInt32 LsaNtStatusToWinError(
+ UInt32 Status);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaRegisterLogonProcess(
+ NativeHelpers.LSA_STRING LogonProcessName,
+ out SafeLsaHandle LsaHandle,
+ out IntPtr SecurityMode);
+ }
+
+ internal class SafeLsaHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeLsaHandle() : base(true) { }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ UInt32 res = NativeMethods.LsaDeregisterLogonProcess(handle);
+ return res == 0;
+ }
+ }
+
+ internal class SafeLsaMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeLsaMemoryBuffer() : base(true) { }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ UInt32 res = NativeMethods.LsaFreeReturnBuffer(handle);
+ return res == 0;
+ }
+ }
+
+ internal class NoopSafeHandle : SafeHandle
+ {
+ public NoopSafeHandle() : base(IntPtr.Zero, false) { }
+ public override bool IsInvalid { get { return false; } }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle() { return true; }
+ }
+
+ [Flags]
+ public enum LogonFlags
+ {
+ WithProfile = 0x00000001,
+ NetcredentialsOnly = 0x00000002
+ }
+
+ public class BecomeUtil
+ {
+ private static List<string> SERVICE_SIDS = new List<string>()
+ {
+ "S-1-5-18", // NT AUTHORITY\SYSTEM
+ "S-1-5-19", // NT AUTHORITY\LocalService
+ "S-1-5-20" // NT AUTHORITY\NetworkService
+ };
+ private static int WINDOWS_STATION_ALL_ACCESS = 0x000F037F;
+ private static int DESKTOP_RIGHTS_ALL_ACCESS = 0x000F01FF;
+
+ public static Result CreateProcessAsUser(string username, string password, string command)
+ {
+ return CreateProcessAsUser(username, password, LogonFlags.WithProfile, LogonType.Interactive,
+ null, command, null, null, "");
+ }
+
+ public static Result CreateProcessAsUser(string username, string password, LogonFlags logonFlags, LogonType logonType,
+ string lpApplicationName, string lpCommandLine, string lpCurrentDirectory, IDictionary environment,
+ string stdin)
+ {
+ byte[] stdinBytes;
+ if (String.IsNullOrEmpty(stdin))
+ stdinBytes = new byte[0];
+ else
+ {
+ if (!stdin.EndsWith(Environment.NewLine))
+ stdin += Environment.NewLine;
+ stdinBytes = new UTF8Encoding(false).GetBytes(stdin);
+ }
+ return CreateProcessAsUser(username, password, logonFlags, logonType, lpApplicationName, lpCommandLine,
+ lpCurrentDirectory, environment, stdinBytes);
+ }
+
+ /// <summary>
+ /// Creates a process as another user account. This method will attempt to run as another user with the
+ /// highest possible permissions available. The main privilege required is the SeDebugPrivilege, without
+ /// this privilege you can only run as a local or domain user if the username and password is specified.
+ /// </summary>
+ /// <param name="username">The username of the runas user</param>
+ /// <param name="password">The password of the runas user</param>
+ /// <param name="logonFlags">LogonFlags to control how to logon a user when the password is specified</param>
+ /// <param name="logonType">Controls what type of logon is used, this only applies when the password is specified</param>
+ /// <param name="lpApplicationName">The name of the executable or batch file to executable</param>
+ /// <param name="lpCommandLine">The command line to execute, typically this includes lpApplication as the first argument</param>
+ /// <param name="lpCurrentDirectory">The full path to the current directory for the process, null will have the same cwd as the calling process</param>
+ /// <param name="environment">A dictionary of key/value pairs to define the new process environment</param>
+ /// <param name="stdin">Bytes sent to the stdin pipe</param>
+ /// <returns>Ansible.Process.Result object that contains the command output and return code</returns>
+ public static Result CreateProcessAsUser(string username, string password, LogonFlags logonFlags, LogonType logonType,
+ string lpApplicationName, string lpCommandLine, string lpCurrentDirectory, IDictionary environment, byte[] stdin)
+ {
+ // While we use STARTUPINFOEX having EXTENDED_STARTUPINFO_PRESENT causes a parameter validation error
+ Process.NativeHelpers.ProcessCreationFlags creationFlags = Process.NativeHelpers.ProcessCreationFlags.CREATE_UNICODE_ENVIRONMENT;
+ Process.NativeHelpers.PROCESS_INFORMATION pi = new Process.NativeHelpers.PROCESS_INFORMATION();
+ Process.NativeHelpers.STARTUPINFOEX si = new Process.NativeHelpers.STARTUPINFOEX();
+ si.startupInfo.dwFlags = Process.NativeHelpers.StartupInfoFlags.USESTDHANDLES;
+
+ SafeFileHandle stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinRead, stdinWrite;
+ ProcessUtil.CreateStdioPipes(si, out stdoutRead, out stdoutWrite, out stderrRead, out stderrWrite,
+ out stdinRead, out stdinWrite);
+ FileStream stdinStream = new FileStream(stdinWrite, FileAccess.Write);
+
+ // $null from PowerShell ends up as an empty string, we need to convert back as an empty string doesn't
+ // make sense for these parameters
+ if (lpApplicationName == "")
+ lpApplicationName = null;
+
+ if (lpCurrentDirectory == "")
+ lpCurrentDirectory = null;
+
+ // A user may have 2 tokens, 1 limited and 1 elevated. GetUserTokens will return both token to ensure
+ // we don't close one of the pairs while the process is still running. If the process tries to retrieve
+ // one of the pairs and the token handle is closed then it will fail with ERROR_NO_SUCH_LOGON_SESSION.
+ List<SafeNativeHandle> userTokens = GetUserTokens(username, password, logonType);
+ try
+ {
+ using (Process.SafeMemoryBuffer lpEnvironment = ProcessUtil.CreateEnvironmentPointer(environment))
+ {
+ bool launchSuccess = false;
+ StringBuilder commandLine = new StringBuilder(lpCommandLine);
+ foreach (SafeNativeHandle token in userTokens)
+ {
+ // GetUserTokens could return null if an elevated token could not be retrieved.
+ if (token == null)
+ continue;
+
+ if (NativeMethods.CreateProcessWithTokenW(token, logonFlags, lpApplicationName,
+ commandLine, creationFlags, lpEnvironment, lpCurrentDirectory, si, out pi))
+ {
+ launchSuccess = true;
+ break;
+ }
+ }
+
+ if (!launchSuccess)
+ throw new Process.Win32Exception("CreateProcessWithTokenW() failed");
+ }
+ return ProcessUtil.WaitProcess(stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinStream, stdin,
+ pi.hProcess);
+ }
+ finally
+ {
+ userTokens.Where(t => t != null).ToList().ForEach(t => t.Dispose());
+ }
+ }
+
+ private static List<SafeNativeHandle> GetUserTokens(string username, string password, LogonType logonType)
+ {
+ List<SafeNativeHandle> userTokens = new List<SafeNativeHandle>();
+
+ SafeNativeHandle systemToken = null;
+ bool impersonated = false;
+ string becomeSid = username;
+ if (logonType != LogonType.NewCredentials)
+ {
+ // If prefixed with .\, we are becoming a local account, strip the prefix
+ if (username.StartsWith(".\\"))
+ username = username.Substring(2);
+
+ NTAccount account = new NTAccount(username);
+ becomeSid = ((SecurityIdentifier)account.Translate(typeof(SecurityIdentifier))).Value;
+
+ // Grant access to the current Windows Station and Desktop to the become user
+ GrantAccessToWindowStationAndDesktop(account);
+
+ // Try and impersonate a SYSTEM token, we need a SYSTEM token to either become a well known service
+ // account or have administrative rights on the become access token.
+ // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available.
+ // https://github.com/ansible/ansible/issues/71453
+ bool mostPrivileges = becomeSid == "S-1-5-18";
+ systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"),
+ new List<string>() { "SeTcbPrivilege" }, mostPrivileges);
+ if (systemToken != null)
+ {
+ try
+ {
+ TokenUtil.ImpersonateToken(systemToken);
+ impersonated = true;
+ }
+ catch (Process.Win32Exception) { } // We tried, just rely on current user's permissions.
+ }
+ }
+
+ // We require impersonation if becoming a service sid or becoming a user without a password
+ if (!impersonated && (SERVICE_SIDS.Contains(becomeSid) || String.IsNullOrEmpty(password)))
+ throw new Exception("Failed to get token for NT AUTHORITY\\SYSTEM required for become as a service account or an account without a password");
+
+ try
+ {
+ if (becomeSid == "S-1-5-18")
+ userTokens.Add(systemToken);
+ // Cannot use String.IsEmptyOrNull() as an empty string is an account that doesn't have a pass.
+ // We only use S4U if no password was defined or it was null
+ else if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
+ {
+ // If no password was specified, try and duplicate an existing token for that user or use S4U to
+ // generate one without network credentials
+ SecurityIdentifier sid = new SecurityIdentifier(becomeSid);
+ SafeNativeHandle becomeToken = GetPrimaryTokenForUser(sid);
+ if (becomeToken != null)
+ {
+ userTokens.Add(GetElevatedToken(becomeToken));
+ userTokens.Add(becomeToken);
+ }
+ else
+ {
+ becomeToken = GetS4UTokenForUser(sid, logonType);
+ userTokens.Add(null);
+ userTokens.Add(becomeToken);
+ }
+ }
+ else
+ {
+ string domain = null;
+ switch (becomeSid)
+ {
+ case "S-1-5-19":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "LocalService";
+ break;
+ case "S-1-5-20":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "NetworkService";
+ break;
+ default:
+ // Trying to become a local or domain account
+ if (username.Contains(@"\"))
+ {
+ string[] userSplit = username.Split(new char[1] { '\\' }, 2);
+ domain = userSplit[0];
+ username = userSplit[1];
+ }
+ else if (!username.Contains("@"))
+ domain = ".";
+ break;
+ }
+
+ SafeNativeHandle hToken = TokenUtil.LogonUser(username, domain, password, logonType,
+ LogonProvider.Default);
+
+ // Get the elevated token for a local/domain accounts only
+ if (!SERVICE_SIDS.Contains(becomeSid))
+ userTokens.Add(GetElevatedToken(hToken));
+ userTokens.Add(hToken);
+ }
+ }
+ finally
+ {
+ if (impersonated)
+ TokenUtil.RevertToSelf();
+ }
+
+ return userTokens;
+ }
+
+ private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid,
+ List<string> requiredPrivileges = null, bool mostPrivileges = false)
+ {
+ // According to CreateProcessWithTokenW we require a token with
+ // TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
+ // Also add in TOKEN_IMPERSONATE so we can get an impersonated token
+ TokenAccessLevels dwAccess = TokenAccessLevels.Query |
+ TokenAccessLevels.Duplicate |
+ TokenAccessLevels.AssignPrimary |
+ TokenAccessLevels.Impersonate;
+
+ SafeNativeHandle userToken = null;
+ int privilegeCount = 0;
+
+ foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess))
+ {
+ // Filter out any Network logon tokens, using become with that is useless when S4U
+ // can give us a Batch logon
+ NativeHelpers.SECURITY_LOGON_TYPE tokenLogonType = GetTokenLogonType(hToken);
+ if (tokenLogonType == NativeHelpers.SECURITY_LOGON_TYPE.Network)
+ continue;
+
+ List<string> actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList();
+
+ // If the token has less or the same number of privileges than the current token, skip it.
+ if (mostPrivileges && privilegeCount >= actualPrivileges.Count)
+ continue;
+
+ // Check that the required privileges are on the token
+ if (requiredPrivileges != null)
+ {
+ int missing = requiredPrivileges.Where(x => !actualPrivileges.Contains(x)).Count();
+ if (missing > 0)
+ continue;
+ }
+
+ // Duplicate the token to convert it to a primary token with the access level required.
+ try
+ {
+ userToken = TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
+ SecurityImpersonationLevel.Anonymous, TokenType.Primary);
+ privilegeCount = actualPrivileges.Count;
+ }
+ catch (Process.Win32Exception)
+ {
+ continue;
+ }
+
+ // If we don't care about getting the token with the most privileges, escape the loop as we already
+ // have a token.
+ if (!mostPrivileges)
+ break;
+ }
+
+ return userToken;
+ }
+
+ private static SafeNativeHandle GetS4UTokenForUser(SecurityIdentifier sid, LogonType logonType)
+ {
+ NTAccount becomeAccount = (NTAccount)sid.Translate(typeof(NTAccount));
+ string[] userSplit = becomeAccount.Value.Split(new char[1] { '\\' }, 2);
+ string domainName = userSplit[0];
+ string username = userSplit[1];
+ bool domainUser = domainName.ToLowerInvariant() != Environment.MachineName.ToLowerInvariant();
+
+ NativeHelpers.LSA_STRING logonProcessName = "ansible";
+ SafeLsaHandle lsaHandle;
+ IntPtr securityMode;
+ UInt32 res = NativeMethods.LsaRegisterLogonProcess(logonProcessName, out lsaHandle, out securityMode);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res), "LsaRegisterLogonProcess() failed");
+
+ using (lsaHandle)
+ {
+ NativeHelpers.LSA_STRING packageName = domainUser ? "Kerberos" : "MICROSOFT_AUTHENTICATION_PACKAGE_V1_0";
+ UInt32 authPackage;
+ res = NativeMethods.LsaLookupAuthenticationPackage(lsaHandle, packageName, out authPackage);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res),
+ String.Format("LsaLookupAuthenticationPackage({0}) failed", (string)packageName));
+
+ int usernameLength = username.Length * sizeof(char);
+ int domainLength = domainName.Length * sizeof(char);
+ int authInfoLength = (Marshal.SizeOf(typeof(NativeHelpers.KERB_S4U_LOGON)) + usernameLength + domainLength);
+ IntPtr authInfo = Marshal.AllocHGlobal((int)authInfoLength);
+ try
+ {
+ IntPtr usernamePtr = IntPtr.Add(authInfo, Marshal.SizeOf(typeof(NativeHelpers.KERB_S4U_LOGON)));
+ IntPtr domainPtr = IntPtr.Add(usernamePtr, usernameLength);
+
+ // KERB_S4U_LOGON has the same structure as MSV1_0_S4U_LOGON (local accounts)
+ NativeHelpers.KERB_S4U_LOGON s4uLogon = new NativeHelpers.KERB_S4U_LOGON
+ {
+ MessageType = 12, // KerbS4ULogon
+ Flags = 0,
+ ClientUpn = new NativeHelpers.LSA_UNICODE_STRING
+ {
+ Length = (UInt16)usernameLength,
+ MaximumLength = (UInt16)usernameLength,
+ Buffer = usernamePtr,
+ },
+ ClientRealm = new NativeHelpers.LSA_UNICODE_STRING
+ {
+ Length = (UInt16)domainLength,
+ MaximumLength = (UInt16)domainLength,
+ Buffer = domainPtr,
+ },
+ };
+ Marshal.StructureToPtr(s4uLogon, authInfo, false);
+ Marshal.Copy(username.ToCharArray(), 0, usernamePtr, username.Length);
+ Marshal.Copy(domainName.ToCharArray(), 0, domainPtr, domainName.Length);
+
+ Luid sourceLuid;
+ if (!NativeMethods.AllocateLocallyUniqueId(out sourceLuid))
+ throw new Process.Win32Exception("AllocateLocallyUniqueId() failed");
+
+ NativeHelpers.TOKEN_SOURCE tokenSource = new NativeHelpers.TOKEN_SOURCE
+ {
+ SourceName = "ansible\0".ToCharArray(),
+ SourceIdentifier = sourceLuid,
+ };
+
+ // Only Batch or Network will work with S4U, prefer Batch but use Network if asked
+ LogonType lsaLogonType = logonType == LogonType.Network
+ ? LogonType.Network
+ : LogonType.Batch;
+ SafeLsaMemoryBuffer profileBuffer;
+ UInt32 profileBufferLength;
+ Luid logonId;
+ SafeNativeHandle hToken;
+ IntPtr quotas;
+ UInt32 subStatus;
+
+ res = NativeMethods.LsaLogonUser(lsaHandle, logonProcessName, lsaLogonType, authPackage,
+ authInfo, (UInt32)authInfoLength, IntPtr.Zero, tokenSource, out profileBuffer, out profileBufferLength,
+ out logonId, out hToken, out quotas, out subStatus);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res),
+ String.Format("LsaLogonUser() failed with substatus {0}", subStatus));
+
+ profileBuffer.Dispose();
+ return hToken;
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(authInfo);
+ }
+ }
+ }
+
+ private static SafeNativeHandle GetElevatedToken(SafeNativeHandle hToken)
+ {
+ TokenElevationType tet = TokenUtil.GetTokenElevationType(hToken);
+ // We already have the best token we can get, no linked token is really available.
+ if (tet != TokenElevationType.Limited)
+ return null;
+
+ SafeNativeHandle linkedToken = TokenUtil.GetTokenLinkedToken(hToken);
+ TokenStatistics tokenStats = TokenUtil.GetTokenStatistics(linkedToken);
+
+ // We can only use a token if it's a primary one (we had the SeTcbPrivilege set)
+ if (tokenStats.TokenType == TokenType.Primary)
+ return linkedToken;
+ else
+ return null;
+ }
+
+ private static NativeHelpers.SECURITY_LOGON_TYPE GetTokenLogonType(SafeNativeHandle hToken)
+ {
+ TokenStatistics stats = TokenUtil.GetTokenStatistics(hToken);
+
+ SafeLsaMemoryBuffer sessionDataPtr;
+ UInt32 res = NativeMethods.LsaGetLogonSessionData(ref stats.AuthenticationId, out sessionDataPtr);
+ if (res != 0)
+ // Default to Network, if we weren't able to get the actual type treat it as an error and assume
+ // we don't want to run a process with the token
+ return NativeHelpers.SECURITY_LOGON_TYPE.Network;
+
+ using (sessionDataPtr)
+ {
+ NativeHelpers.SECURITY_LOGON_SESSION_DATA sessionData = (NativeHelpers.SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure(
+ sessionDataPtr.DangerousGetHandle(), typeof(NativeHelpers.SECURITY_LOGON_SESSION_DATA));
+ return sessionData.LogonType;
+ }
+ }
+
+ private static void GrantAccessToWindowStationAndDesktop(IdentityReference account)
+ {
+ GrantAccess(account, NativeMethods.GetProcessWindowStation(), WINDOWS_STATION_ALL_ACCESS);
+ GrantAccess(account, NativeMethods.GetThreadDesktop(NativeMethods.GetCurrentThreadId()), DESKTOP_RIGHTS_ALL_ACCESS);
+ }
+
+ private static void GrantAccess(IdentityReference account, NoopSafeHandle handle, int accessMask)
+ {
+ GenericSecurity security = new GenericSecurity(false, ResourceType.WindowObject, handle, AccessControlSections.Access);
+ security.AddAccessRule(new GenericAccessRule(account, accessMask, AccessControlType.Allow));
+ security.Persist(handle, AccessControlSections.Access);
+ }
+
+ private class GenericSecurity : NativeObjectSecurity
+ {
+ public GenericSecurity(bool isContainer, ResourceType resType, SafeHandle objectHandle, AccessControlSections sectionsRequested)
+ : base(isContainer, resType, objectHandle, sectionsRequested) { }
+ public new void Persist(SafeHandle handle, AccessControlSections includeSections) { base.Persist(handle, includeSections); }
+ public new void AddAccessRule(AccessRule rule) { base.AddAccessRule(rule); }
+ public override Type AccessRightType { get { throw new NotImplementedException(); } }
+ public override AccessRule AccessRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
+ InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AccessControlType type)
+ { throw new NotImplementedException(); }
+ public override Type AccessRuleType { get { return typeof(AccessRule); } }
+ public override AuditRule AuditRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
+ InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AuditFlags flags)
+ { throw new NotImplementedException(); }
+ public override Type AuditRuleType { get { return typeof(AuditRule); } }
+ }
+
+ private class GenericAccessRule : AccessRule
+ {
+ public GenericAccessRule(IdentityReference identity, int accessMask, AccessControlType type) :
+ base(identity, accessMask, false, InheritanceFlags.None, PropagationFlags.None, type)
+ { }
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
new file mode 100644
index 0000000..2c0b266
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
@@ -0,0 +1,443 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible.Privilege
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID_AND_ATTRIBUTES
+ {
+ public LUID Luid;
+ public PrivilegeAttributes Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_PRIVILEGES
+ {
+ public UInt32 PrivilegeCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public LUID_AND_ATTRIBUTES[] Privileges;
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool AdjustTokenPrivileges(
+ SafeNativeHandle TokenHandle,
+ [MarshalAs(UnmanagedType.Bool)] bool DisableAllPrivileges,
+ SafeMemoryBuffer NewState,
+ UInt32 BufferLength,
+ SafeMemoryBuffer PreviousState,
+ out UInt32 ReturnLength);
+
+ [DllImport("kernel32.dll")]
+ public static extern bool CloseHandle(
+ IntPtr hObject);
+
+ [DllImport("kernel32")]
+ public static extern SafeWaitHandle GetCurrentProcess();
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool GetTokenInformation(
+ SafeNativeHandle TokenHandle,
+ UInt32 TokenInformationClass,
+ SafeMemoryBuffer TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeName(
+ string lpSystemName,
+ ref NativeHelpers.LUID lpLuid,
+ StringBuilder lpName,
+ ref UInt32 cchName);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeValue(
+ string lpSystemName,
+ string lpName,
+ out NativeHelpers.LUID lpLuid);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool OpenProcessToken(
+ SafeHandle ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out SafeNativeHandle TokenHandle);
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ internal class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeNativeHandle() : base(true) { }
+ public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; }
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseHandle(handle);
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ [Flags]
+ public enum PrivilegeAttributes : uint
+ {
+ Disabled = 0x00000000,
+ EnabledByDefault = 0x00000001,
+ Enabled = 0x00000002,
+ Removed = 0x00000004,
+ UsedForAccess = 0x80000000,
+ }
+
+ public class PrivilegeEnabler : IDisposable
+ {
+ private SafeHandle process;
+ private Dictionary<string, bool?> previousState;
+
+ /// <summary>
+ /// Temporarily enables the privileges specified and reverts once the class is disposed.
+ /// </summary>
+ /// <param name="strict">Whether to fail if any privilege failed to be enabled, if false then this will continue silently</param>
+ /// <param name="privileges">A list of privileges to enable</param>
+ public PrivilegeEnabler(bool strict, params string[] privileges)
+ {
+ if (privileges.Length > 0)
+ {
+ process = PrivilegeUtil.GetCurrentProcess();
+ Dictionary<string, bool?> newState = new Dictionary<string, bool?>();
+ for (int i = 0; i < privileges.Length; i++)
+ newState.Add(privileges[i], true);
+ try
+ {
+ previousState = PrivilegeUtil.SetTokenPrivileges(process, newState, strict);
+ }
+ catch (Win32Exception e)
+ {
+ throw new Win32Exception(e.NativeErrorCode, String.Format("Failed to enable privilege(s) {0}", String.Join(", ", privileges)));
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ // disables any privileges that were enabled by this class
+ if (previousState != null)
+ PrivilegeUtil.SetTokenPrivileges(process, previousState);
+ GC.SuppressFinalize(this);
+ }
+ ~PrivilegeEnabler() { this.Dispose(); }
+ }
+
+ public class PrivilegeUtil
+ {
+ private static readonly UInt32 TOKEN_PRIVILEGES = 3;
+
+ /// <summary>
+ /// Checks if the specific privilege constant is a valid privilege name
+ /// </summary>
+ /// <param name="name">The privilege constant (Se*Privilege) is valid</param>
+ /// <returns>true if valid, else false</returns>
+ public static bool CheckPrivilegeName(string name)
+ {
+ NativeHelpers.LUID luid;
+ if (!NativeMethods.LookupPrivilegeValue(null, name, out luid))
+ {
+ int errCode = Marshal.GetLastWin32Error();
+ if (errCode != 1313) // ERROR_NO_SUCH_PRIVILEGE
+ throw new Win32Exception(errCode, String.Format("LookupPrivilegeValue({0}) failed", name));
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+ }
+
+ /// <summary>
+ /// Disables the privilege specified
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to disable</param>
+ /// <param name="privilege">The privilege constant to disable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> DisablePrivilege(SafeHandle token, string privilege)
+ {
+ return SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, false } });
+ }
+
+ /// <summary>
+ /// Disables all the privileges
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to disable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> DisableAllPrivileges(SafeHandle token)
+ {
+ return AdjustTokenPrivileges(token, null, false);
+ }
+
+ /// <summary>
+ /// Enables the privilege specified
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to enable</param>
+ /// <param name="privilege">The privilege constant to enable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> EnablePrivilege(SafeHandle token, string privilege)
+ {
+ return SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, true } });
+ }
+
+ /// <summary>
+ /// Get's the status of all the privileges on the token specified
+ /// </summary>
+ /// <param name="token">The process token to get the privilege status on</param>
+ /// <returns>Dictionary where the key is the privilege constant and the value is the PrivilegeAttributes flags</returns>
+ public static Dictionary<String, PrivilegeAttributes> GetAllPrivilegeInfo(SafeHandle token)
+ {
+ SafeNativeHandle hToken = null;
+ if (!NativeMethods.OpenProcessToken(token, TokenAccessLevels.Query, out hToken))
+ throw new Win32Exception("OpenProcessToken() failed");
+
+ using (hToken)
+ {
+ UInt32 tokenLength = 0;
+ NativeMethods.GetTokenInformation(hToken, TOKEN_PRIVILEGES, new SafeMemoryBuffer(0), 0, out tokenLength);
+
+ NativeHelpers.LUID_AND_ATTRIBUTES[] privileges;
+ using (SafeMemoryBuffer privilegesPtr = new SafeMemoryBuffer((int)tokenLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, TOKEN_PRIVILEGES, privilegesPtr, tokenLength, out tokenLength))
+ throw new Win32Exception("GetTokenInformation() for TOKEN_PRIVILEGES failed");
+
+ NativeHelpers.TOKEN_PRIVILEGES privilegeInfo = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ privilegesPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ privileges = new NativeHelpers.LUID_AND_ATTRIBUTES[privilegeInfo.PrivilegeCount];
+ PtrToStructureArray(privileges, IntPtr.Add(privilegesPtr.DangerousGetHandle(), Marshal.SizeOf(privilegeInfo.PrivilegeCount)));
+ }
+
+ return privileges.ToDictionary(p => GetPrivilegeName(p.Luid), p => p.Attributes);
+ }
+ }
+
+ /// <summary>
+ /// Get a handle to the current process for use with the methods above
+ /// </summary>
+ /// <returns>SafeWaitHandle handle of the current process token</returns>
+ public static SafeWaitHandle GetCurrentProcess()
+ {
+ return NativeMethods.GetCurrentProcess();
+ }
+
+ /// <summary>
+ /// Removes a privilege from the token. This operation is irreversible
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to remove</param>
+ /// <param name="privilege">The privilege constant to remove</param>
+ public static void RemovePrivilege(SafeHandle token, string privilege)
+ {
+ SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, null } });
+ }
+
+ /// <summary>
+ /// Do a bulk set of multiple privileges
+ /// </summary>
+ /// <param name="token">The process token to use when setting the privilege state</param>
+ /// <param name="state">A dictionary that contains the privileges to set, the key is the constant name and the value can be;
+ /// true - enable the privilege
+ /// false - disable the privilege
+ /// null - remove the privilege (this cannot be reversed)
+ /// </param>
+ /// <param name="strict">When true, will fail if one privilege failed to be set, otherwise it will silently continue</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> SetTokenPrivileges(SafeHandle token, IDictionary state, bool strict = true)
+ {
+ NativeHelpers.LUID_AND_ATTRIBUTES[] privilegeAttr = new NativeHelpers.LUID_AND_ATTRIBUTES[state.Count];
+ int i = 0;
+
+ foreach (DictionaryEntry entry in state)
+ {
+ string key = (string)entry.Key;
+ NativeHelpers.LUID luid;
+ if (!NativeMethods.LookupPrivilegeValue(null, key, out luid))
+ throw new Win32Exception(String.Format("LookupPrivilegeValue({0}) failed", key));
+
+ PrivilegeAttributes attributes;
+ switch ((bool?)entry.Value)
+ {
+ case true:
+ attributes = PrivilegeAttributes.Enabled;
+ break;
+ case false:
+ attributes = PrivilegeAttributes.Disabled;
+ break;
+ default:
+ attributes = PrivilegeAttributes.Removed;
+ break;
+ }
+
+ privilegeAttr[i].Luid = luid;
+ privilegeAttr[i].Attributes = attributes;
+ i++;
+ }
+
+ return AdjustTokenPrivileges(token, privilegeAttr, strict);
+ }
+
+ private static Dictionary<string, bool?> AdjustTokenPrivileges(SafeHandle token, NativeHelpers.LUID_AND_ATTRIBUTES[] newState, bool strict)
+ {
+ bool disableAllPrivileges;
+ SafeMemoryBuffer newStatePtr;
+ NativeHelpers.LUID_AND_ATTRIBUTES[] oldStatePrivileges;
+ UInt32 returnLength;
+
+ if (newState == null)
+ {
+ disableAllPrivileges = true;
+ newStatePtr = new SafeMemoryBuffer(0);
+ }
+ else
+ {
+ disableAllPrivileges = false;
+
+ // Need to manually marshal the bytes requires for newState as the constant size
+ // of LUID_AND_ATTRIBUTES is set to 1 and can't be overridden at runtime, TOKEN_PRIVILEGES
+ // always contains at least 1 entry so we need to calculate the extra size if there are
+ // nore than 1 LUID_AND_ATTRIBUTES entry
+ int tokenPrivilegesSize = Marshal.SizeOf(typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ int luidAttrSize = 0;
+ if (newState.Length > 1)
+ luidAttrSize = Marshal.SizeOf(typeof(NativeHelpers.LUID_AND_ATTRIBUTES)) * (newState.Length - 1);
+ int totalSize = tokenPrivilegesSize + luidAttrSize;
+ byte[] newStateBytes = new byte[totalSize];
+
+ // get the first entry that includes the struct details
+ NativeHelpers.TOKEN_PRIVILEGES tokenPrivileges = new NativeHelpers.TOKEN_PRIVILEGES()
+ {
+ PrivilegeCount = (UInt32)newState.Length,
+ Privileges = new NativeHelpers.LUID_AND_ATTRIBUTES[1],
+ };
+ if (newState.Length > 0)
+ tokenPrivileges.Privileges[0] = newState[0];
+ int offset = StructureToBytes(tokenPrivileges, newStateBytes, 0);
+
+ // copy the remaining LUID_AND_ATTRIBUTES (if any)
+ for (int i = 1; i < newState.Length; i++)
+ offset += StructureToBytes(newState[i], newStateBytes, offset);
+
+ // finally create the pointer to the byte array we just created
+ newStatePtr = new SafeMemoryBuffer(newStateBytes.Length);
+ Marshal.Copy(newStateBytes, 0, newStatePtr.DangerousGetHandle(), newStateBytes.Length);
+ }
+
+ using (newStatePtr)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.OpenProcessToken(token, TokenAccessLevels.Query | TokenAccessLevels.AdjustPrivileges, out hToken))
+ throw new Win32Exception("OpenProcessToken() failed with Query and AdjustPrivileges");
+
+ using (hToken)
+ {
+ if (!NativeMethods.AdjustTokenPrivileges(hToken, disableAllPrivileges, newStatePtr, 0, new SafeMemoryBuffer(0), out returnLength))
+ {
+ int errCode = Marshal.GetLastWin32Error();
+ if (errCode != 122) // ERROR_INSUFFICIENT_BUFFER
+ throw new Win32Exception(errCode, "AdjustTokenPrivileges() failed to get old state size");
+ }
+
+ using (SafeMemoryBuffer oldStatePtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ bool res = NativeMethods.AdjustTokenPrivileges(hToken, disableAllPrivileges, newStatePtr, returnLength, oldStatePtr, out returnLength);
+ int errCode = Marshal.GetLastWin32Error();
+
+ // even when res == true, ERROR_NOT_ALL_ASSIGNED may be set as the last error code
+ // fail if we are running with strict, otherwise ignore those privileges
+ if (!res || ((strict && errCode != 0) || (!strict && !(errCode == 0 || errCode == 0x00000514))))
+ throw new Win32Exception(errCode, "AdjustTokenPrivileges() failed");
+
+ // Marshal the oldStatePtr to the struct
+ NativeHelpers.TOKEN_PRIVILEGES oldState = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ oldStatePtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ oldStatePrivileges = new NativeHelpers.LUID_AND_ATTRIBUTES[oldState.PrivilegeCount];
+ PtrToStructureArray(oldStatePrivileges, IntPtr.Add(oldStatePtr.DangerousGetHandle(), Marshal.SizeOf(oldState.PrivilegeCount)));
+ }
+ }
+ }
+
+ return oldStatePrivileges.ToDictionary(p => GetPrivilegeName(p.Luid), p => (bool?)p.Attributes.HasFlag(PrivilegeAttributes.Enabled));
+ }
+
+ private static string GetPrivilegeName(NativeHelpers.LUID luid)
+ {
+ UInt32 nameLen = 0;
+ NativeMethods.LookupPrivilegeName(null, ref luid, null, ref nameLen);
+
+ StringBuilder name = new StringBuilder((int)(nameLen + 1));
+ if (!NativeMethods.LookupPrivilegeName(null, ref luid, name, ref nameLen))
+ throw new Win32Exception("LookupPrivilegeName() failed");
+
+ return name.ToString();
+ }
+
+ private static void PtrToStructureArray<T>(T[] array, IntPtr ptr)
+ {
+ IntPtr ptrOffset = ptr;
+ for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T))))
+ array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T));
+ }
+
+ private static int StructureToBytes<T>(T structure, byte[] array, int offset)
+ {
+ int size = Marshal.SizeOf(structure);
+ using (SafeMemoryBuffer structPtr = new SafeMemoryBuffer(size))
+ {
+ Marshal.StructureToPtr(structure, structPtr.DangerousGetHandle(), false);
+ Marshal.Copy(structPtr.DangerousGetHandle(), array, offset, size);
+ }
+
+ return size;
+ }
+ }
+}
+
diff --git a/lib/ansible/module_utils/csharp/Ansible.Process.cs b/lib/ansible/module_utils/csharp/Ansible.Process.cs
new file mode 100644
index 0000000..f4c68f0
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Process.cs
@@ -0,0 +1,461 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.IO;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading;
+
+namespace Ansible.Process
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public class SECURITY_ATTRIBUTES
+ {
+ public UInt32 nLength;
+ public IntPtr lpSecurityDescriptor;
+ public bool bInheritHandle = false;
+ public SECURITY_ATTRIBUTES()
+ {
+ nLength = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public class STARTUPINFO
+ {
+ public UInt32 cb;
+ public IntPtr lpReserved;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpDesktop;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpTitle;
+ public UInt32 dwX;
+ public UInt32 dwY;
+ public UInt32 dwXSize;
+ public UInt32 dwYSize;
+ public UInt32 dwXCountChars;
+ public UInt32 dwYCountChars;
+ public UInt32 dwFillAttribute;
+ public StartupInfoFlags dwFlags;
+ public UInt16 wShowWindow;
+ public UInt16 cbReserved2;
+ public IntPtr lpReserved2;
+ public SafeFileHandle hStdInput;
+ public SafeFileHandle hStdOutput;
+ public SafeFileHandle hStdError;
+ public STARTUPINFO()
+ {
+ cb = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public class STARTUPINFOEX
+ {
+ public STARTUPINFO startupInfo;
+ public IntPtr lpAttributeList;
+ public STARTUPINFOEX()
+ {
+ startupInfo = new STARTUPINFO();
+ startupInfo.cb = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct PROCESS_INFORMATION
+ {
+ public IntPtr hProcess;
+ public IntPtr hThread;
+ public int dwProcessId;
+ public int dwThreadId;
+ }
+
+ [Flags]
+ public enum ProcessCreationFlags : uint
+ {
+ CREATE_NEW_CONSOLE = 0x00000010,
+ CREATE_UNICODE_ENVIRONMENT = 0x00000400,
+ EXTENDED_STARTUPINFO_PRESENT = 0x00080000
+ }
+
+ [Flags]
+ public enum StartupInfoFlags : uint
+ {
+ USESTDHANDLES = 0x00000100
+ }
+
+ [Flags]
+ public enum HandleFlags : uint
+ {
+ None = 0,
+ INHERIT = 1
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool AllocConsole();
+
+ [DllImport("shell32.dll", SetLastError = true)]
+ public static extern SafeMemoryBuffer CommandLineToArgvW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCmdLine,
+ out int pNumArgs);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool CreatePipe(
+ out SafeFileHandle hReadPipe,
+ out SafeFileHandle hWritePipe,
+ NativeHelpers.SECURITY_ATTRIBUTES lpPipeAttributes,
+ UInt32 nSize);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool CreateProcessW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpApplicationName,
+ StringBuilder lpCommandLine,
+ IntPtr lpProcessAttributes,
+ IntPtr lpThreadAttributes,
+ bool bInheritHandles,
+ NativeHelpers.ProcessCreationFlags dwCreationFlags,
+ SafeMemoryBuffer lpEnvironment,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCurrentDirectory,
+ NativeHelpers.STARTUPINFOEX lpStartupInfo,
+ out NativeHelpers.PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool FreeConsole();
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern IntPtr GetConsoleWindow();
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool GetExitCodeProcess(
+ SafeWaitHandle hProcess,
+ out UInt32 lpExitCode);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern uint SearchPathW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpPath,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpFileName,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpExtension,
+ UInt32 nBufferLength,
+ [MarshalAs(UnmanagedType.LPTStr)] StringBuilder lpBuffer,
+ out IntPtr lpFilePart);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetConsoleCP(
+ UInt32 wCodePageID);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetConsoleOutputCP(
+ UInt32 wCodePageID);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetHandleInformation(
+ SafeFileHandle hObject,
+ NativeHelpers.HandleFlags dwMask,
+ NativeHelpers.HandleFlags dwFlags);
+
+ [DllImport("kernel32.dll")]
+ public static extern UInt32 WaitForSingleObject(
+ SafeWaitHandle hHandle,
+ UInt32 dwMilliseconds);
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class Result
+ {
+ public string StandardOut { get; internal set; }
+ public string StandardError { get; internal set; }
+ public uint ExitCode { get; internal set; }
+ }
+
+ public class ProcessUtil
+ {
+ /// <summary>
+ /// Parses a command line string into an argv array according to the Windows rules
+ /// </summary>
+ /// <param name="lpCommandLine">The command line to parse</param>
+ /// <returns>An array of arguments interpreted by Windows</returns>
+ public static string[] ParseCommandLine(string lpCommandLine)
+ {
+ int numArgs;
+ using (SafeMemoryBuffer buf = NativeMethods.CommandLineToArgvW(lpCommandLine, out numArgs))
+ {
+ if (buf.IsInvalid)
+ throw new Win32Exception("Error parsing command line");
+ IntPtr[] strptrs = new IntPtr[numArgs];
+ Marshal.Copy(buf.DangerousGetHandle(), strptrs, 0, numArgs);
+ return strptrs.Select(s => Marshal.PtrToStringUni(s)).ToArray();
+ }
+ }
+
+ /// <summary>
+ /// Searches the path for the executable specified. Will throw a Win32Exception if the file is not found.
+ /// </summary>
+ /// <param name="lpFileName">The executable to search for</param>
+ /// <returns>The full path of the executable to search for</returns>
+ public static string SearchPath(string lpFileName)
+ {
+ StringBuilder sbOut = new StringBuilder(0);
+ IntPtr filePartOut = IntPtr.Zero;
+ UInt32 res = NativeMethods.SearchPathW(null, lpFileName, null, (UInt32)sbOut.Capacity, sbOut, out filePartOut);
+ if (res == 0)
+ {
+ int lastErr = Marshal.GetLastWin32Error();
+ if (lastErr == 2) // ERROR_FILE_NOT_FOUND
+ throw new FileNotFoundException(String.Format("Could not find file '{0}'.", lpFileName));
+ else
+ throw new Win32Exception(String.Format("SearchPathW({0}) failed to get buffer length", lpFileName));
+ }
+
+ sbOut.EnsureCapacity((int)res);
+ if (NativeMethods.SearchPathW(null, lpFileName, null, (UInt32)sbOut.Capacity, sbOut, out filePartOut) == 0)
+ throw new Win32Exception(String.Format("SearchPathW({0}) failed", lpFileName));
+
+ return sbOut.ToString();
+ }
+
+ public static Result CreateProcess(string command)
+ {
+ return CreateProcess(null, command, null, null, String.Empty);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, String.Empty);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, string stdin)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdin, null);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, byte[] stdin)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdin, null);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, string stdin, string outputEncoding)
+ {
+ byte[] stdinBytes;
+ if (String.IsNullOrEmpty(stdin))
+ stdinBytes = new byte[0];
+ else
+ {
+ if (!stdin.EndsWith(Environment.NewLine))
+ stdin += Environment.NewLine;
+ stdinBytes = new UTF8Encoding(false).GetBytes(stdin);
+ }
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdinBytes, outputEncoding);
+ }
+
+ /// <summary>
+ /// Creates a process based on the CreateProcess API call.
+ /// </summary>
+ /// <param name="lpApplicationName">The name of the executable or batch file to execute</param>
+ /// <param name="lpCommandLine">The command line to execute, typically this includes lpApplication as the first argument</param>
+ /// <param name="lpCurrentDirectory">The full path to the current directory for the process, null will have the same cwd as the calling process</param>
+ /// <param name="environment">A dictionary of key/value pairs to define the new process environment</param>
+ /// <param name="stdin">A byte array to send over the stdin pipe</param>
+ /// <param name="outputEncoding">The character encoding for decoding stdout/stderr output of the process.</param>
+ /// <returns>Result object that contains the command output and return code</returns>
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, byte[] stdin, string outputEncoding)
+ {
+ NativeHelpers.ProcessCreationFlags creationFlags = NativeHelpers.ProcessCreationFlags.CREATE_UNICODE_ENVIRONMENT |
+ NativeHelpers.ProcessCreationFlags.EXTENDED_STARTUPINFO_PRESENT;
+ NativeHelpers.PROCESS_INFORMATION pi = new NativeHelpers.PROCESS_INFORMATION();
+ NativeHelpers.STARTUPINFOEX si = new NativeHelpers.STARTUPINFOEX();
+ si.startupInfo.dwFlags = NativeHelpers.StartupInfoFlags.USESTDHANDLES;
+
+ SafeFileHandle stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinRead, stdinWrite;
+ CreateStdioPipes(si, out stdoutRead, out stdoutWrite, out stderrRead, out stderrWrite, out stdinRead,
+ out stdinWrite);
+ FileStream stdinStream = new FileStream(stdinWrite, FileAccess.Write);
+
+ // $null from PowerShell ends up as an empty string, we need to convert back as an empty string doesn't
+ // make sense for these parameters
+ if (lpApplicationName == "")
+ lpApplicationName = null;
+
+ if (lpCurrentDirectory == "")
+ lpCurrentDirectory = null;
+
+ using (SafeMemoryBuffer lpEnvironment = CreateEnvironmentPointer(environment))
+ {
+ // Create console with utf-8 CP if no existing console is present
+ bool isConsole = false;
+ if (NativeMethods.GetConsoleWindow() == IntPtr.Zero)
+ {
+ isConsole = NativeMethods.AllocConsole();
+
+ // Set console input/output codepage to UTF-8
+ NativeMethods.SetConsoleCP(65001);
+ NativeMethods.SetConsoleOutputCP(65001);
+ }
+
+ try
+ {
+ StringBuilder commandLine = new StringBuilder(lpCommandLine);
+ if (!NativeMethods.CreateProcessW(lpApplicationName, commandLine, IntPtr.Zero, IntPtr.Zero,
+ true, creationFlags, lpEnvironment, lpCurrentDirectory, si, out pi))
+ {
+ throw new Win32Exception("CreateProcessW() failed");
+ }
+ }
+ finally
+ {
+ if (isConsole)
+ NativeMethods.FreeConsole();
+ }
+ }
+
+ return WaitProcess(stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinStream, stdin, pi.hProcess,
+ outputEncoding);
+ }
+
+ internal static void CreateStdioPipes(NativeHelpers.STARTUPINFOEX si, out SafeFileHandle stdoutRead,
+ out SafeFileHandle stdoutWrite, out SafeFileHandle stderrRead, out SafeFileHandle stderrWrite,
+ out SafeFileHandle stdinRead, out SafeFileHandle stdinWrite)
+ {
+ NativeHelpers.SECURITY_ATTRIBUTES pipesec = new NativeHelpers.SECURITY_ATTRIBUTES();
+ pipesec.bInheritHandle = true;
+
+ if (!NativeMethods.CreatePipe(out stdoutRead, out stdoutWrite, pipesec, 0))
+ throw new Win32Exception("STDOUT pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stdoutRead, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDOUT pipe handle setup failed");
+
+ if (!NativeMethods.CreatePipe(out stderrRead, out stderrWrite, pipesec, 0))
+ throw new Win32Exception("STDERR pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stderrRead, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDERR pipe handle setup failed");
+
+ if (!NativeMethods.CreatePipe(out stdinRead, out stdinWrite, pipesec, 0))
+ throw new Win32Exception("STDIN pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stdinWrite, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDIN pipe handle setup failed");
+
+ si.startupInfo.hStdOutput = stdoutWrite;
+ si.startupInfo.hStdError = stderrWrite;
+ si.startupInfo.hStdInput = stdinRead;
+ }
+
+ internal static SafeMemoryBuffer CreateEnvironmentPointer(IDictionary environment)
+ {
+ IntPtr lpEnvironment = IntPtr.Zero;
+ if (environment != null && environment.Count > 0)
+ {
+ StringBuilder environmentString = new StringBuilder();
+ foreach (DictionaryEntry kv in environment)
+ environmentString.AppendFormat("{0}={1}\0", kv.Key, kv.Value);
+ environmentString.Append('\0');
+
+ lpEnvironment = Marshal.StringToHGlobalUni(environmentString.ToString());
+ }
+ return new SafeMemoryBuffer(lpEnvironment);
+ }
+
+ internal static Result WaitProcess(SafeFileHandle stdoutRead, SafeFileHandle stdoutWrite, SafeFileHandle stderrRead,
+ SafeFileHandle stderrWrite, FileStream stdinStream, byte[] stdin, IntPtr hProcess, string outputEncoding = null)
+ {
+ // Default to using UTF-8 as the output encoding, this should be a sane default for most scenarios.
+ outputEncoding = String.IsNullOrEmpty(outputEncoding) ? "utf-8" : outputEncoding;
+ Encoding encodingInstance = Encoding.GetEncoding(outputEncoding);
+
+ FileStream stdoutFS = new FileStream(stdoutRead, FileAccess.Read, 4096);
+ StreamReader stdout = new StreamReader(stdoutFS, encodingInstance, true, 4096);
+ stdoutWrite.Close();
+
+ FileStream stderrFS = new FileStream(stderrRead, FileAccess.Read, 4096);
+ StreamReader stderr = new StreamReader(stderrFS, encodingInstance, true, 4096);
+ stderrWrite.Close();
+
+ stdinStream.Write(stdin, 0, stdin.Length);
+ stdinStream.Close();
+
+ string stdoutStr, stderrStr = null;
+ GetProcessOutput(stdout, stderr, out stdoutStr, out stderrStr);
+ UInt32 rc = GetProcessExitCode(hProcess);
+
+ return new Result
+ {
+ StandardOut = stdoutStr,
+ StandardError = stderrStr,
+ ExitCode = rc
+ };
+ }
+
+ internal static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
+ {
+ var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ string so = null, se = null;
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ so = stdoutStream.ReadToEnd();
+ sowait.Set();
+ });
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ se = stderrStream.ReadToEnd();
+ sewait.Set();
+ });
+ foreach (var wh in new WaitHandle[] { sowait, sewait })
+ wh.WaitOne();
+ stdout = so;
+ stderr = se;
+ }
+
+ internal static UInt32 GetProcessExitCode(IntPtr processHandle)
+ {
+ SafeWaitHandle hProcess = new SafeWaitHandle(processHandle, true);
+ NativeMethods.WaitForSingleObject(hProcess, 0xFFFFFFFF);
+
+ UInt32 exitCode;
+ if (!NativeMethods.GetExitCodeProcess(hProcess, out exitCode))
+ throw new Win32Exception("GetExitCodeProcess() failed");
+ return exitCode;
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/__init__.py b/lib/ansible/module_utils/csharp/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/__init__.py
diff --git a/lib/ansible/module_utils/distro/__init__.py b/lib/ansible/module_utils/distro/__init__.py
new file mode 100644
index 0000000..b70f29c
--- /dev/null
+++ b/lib/ansible/module_utils/distro/__init__.py
@@ -0,0 +1,56 @@
+# (c) 2018 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat distro library.
+'''
+# The following makes it easier for us to script updates of the bundled code
+_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.6.0"}
+
+# The following additional changes have been made:
+# * Remove optparse since it is not needed for our use.
+# * A format string including {} has been changed to {0} (py2.6 compat)
+# * Port two calls from subprocess.check_output to subprocess.Popen().communicate() (py2.6 compat)
+
+
+import sys
+import types
+
+try:
+ import distro as _system_distro
+except ImportError:
+ _system_distro = None
+else:
+ # There could be a 'distro' package/module that isn't what we expect, on the
+ # PYTHONPATH. Rather than erroring out in this case, just fall back to ours.
+ # We require more functions than distro.id(), but this is probably a decent
+ # test that we have something we can reasonably use.
+ if not hasattr(_system_distro, 'id') or \
+ not isinstance(_system_distro.id, types.FunctionType):
+ _system_distro = None
+
+if _system_distro:
+ distro = _system_distro
+else:
+ # Our bundled copy
+ from ansible.module_utils.distro import _distro as distro
+
+sys.modules['ansible.module_utils.distro'] = distro
diff --git a/lib/ansible/module_utils/distro/_distro.py b/lib/ansible/module_utils/distro/_distro.py
new file mode 100644
index 0000000..58e41d4
--- /dev/null
+++ b/lib/ansible/module_utils/distro/_distro.py
@@ -0,0 +1,1416 @@
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A local copy of the license can be found in licenses/Apache-License.txt
+#
+# Modifications to this code have been made by Ansible Project
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 removed it altogether. Its
+predecessor function :py:func:`platform.dist` was already deprecated since
+Python 2.6 and removed in Python 3.8. Still, there are many cases in which
+access to OS distribution information is needed. See `Python issue 1322
+<https://bugs.python.org/issue1322>`_ for more information.
+"""
+
+import logging
+import os
+import re
+import shlex
+import subprocess
+import sys
+import warnings
+
+__version__ = "1.6.0"
+
+# Use `if False` to avoid an ImportError on Python 2. After dropping Python 2
+# support, can use typing.TYPE_CHECKING instead. See:
+# https://docs.python.org/3/library/typing.html#typing.TYPE_CHECKING
+if False: # pragma: nocover
+ from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+ TypedDict,
+ Union,
+ )
+
+ VersionDict = TypedDict(
+ "VersionDict", {"major": str, "minor": str, "build_number": str}
+ )
+ InfoDict = TypedDict(
+ "InfoDict",
+ {
+ "id": str,
+ "version": str,
+ "version_parts": VersionDict,
+ "like": str,
+ "codename": str,
+ },
+ )
+
+
+_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
+_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
+_OS_RELEASE_BASENAME = "os-release"
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+ "ol": "oracle", # Oracle Linux
+ "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4
+ "enterpriseenterpriseserver": "oracle", # Oracle Linux 5
+ "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation
+ "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server
+ "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ "redhat": "rhel", # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
+)
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ "debian_version",
+ "lsb-release",
+ "oem-release",
+ _OS_RELEASE_BASENAME,
+ "system-release",
+ "plesk-release",
+ "iredmail-release",
+)
+
+
+#
+# Python 2.6 does not have subprocess.check_output so replicate it here
+#
+def _my_check_output(*popenargs, **kwargs):
+ r"""Run command with arguments and return its output as a byte string.
+
+ If the exit code was non-zero it raises a CalledProcessError. The
+ CalledProcessError object will have the return code in the returncode
+ attribute and output in the output attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ >>> check_output(["ls", "-l", "/dev/null"])
+ 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
+
+ The stdout argument is not allowed as it is used internally.
+ To capture standard error in the result, use stderr=STDOUT.
+
+ >>> check_output(["/bin/sh", "-c",
+ ... "ls -l non_existent_file ; exit 0"],
+ ... stderr=STDOUT)
+ 'ls: non_existent_file: No such file or directory\n'
+
+ This is a backport of Python-2.7's check output to Python-2.6
+ """
+ if 'stdout' in kwargs:
+ raise ValueError(
+ 'stdout argument not allowed, it will be overridden.'
+ )
+ process = subprocess.Popen(
+ stdout=subprocess.PIPE, *popenargs, **kwargs
+ )
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ # Deviation from Python-2.7: Python-2.6's CalledProcessError does not
+ # have an argument for the stdout so simply omit it.
+ raise subprocess.CalledProcessError(retcode, cmd)
+ return output
+
+
+try:
+ _check_output = subprocess.check_output
+except AttributeError:
+ _check_output = _my_check_output
+
+
+def linux_distribution(full_distribution_name=True):
+ # type: (bool) -> Tuple[str, str, str]
+ """
+ .. deprecated:: 1.6.0
+
+ :func:`distro.linux_distribution()` is deprecated. It should only be
+ used as a compatibility shim with Python's
+ :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
+ :func:`distro.version` and :func:`distro.name` instead.
+
+ Return information about the current OS distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the OS distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular OS distributions.
+ """
+ warnings.warn(
+ "distro.linux_distribution() is deprecated. It should only be used as a "
+ "compatibility shim with Python's platform.linux_distribution(). Please use "
+ "distro.id(), distro.version() and distro.name() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _distro.linux_distribution(full_distribution_name)
+
+
+def id():
+ # type: () -> str
+ """
+ Return the distro ID of the current distribution, as a
+ machine-readable string.
+
+ For a number of OS distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amazon" Amazon Linux
+ "arch" Arch Linux
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ "midnightbsd" MidnightBSD
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the OS distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distro.id()
+
+
+def name(pretty=False):
+ # type: (bool) -> str
+ """
+ Return the name of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distro.name(pretty)
+
+
+def version(pretty=False, best=False):
+ # type: (bool, bool) -> str
+ """
+ Return the version of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distro.version(pretty, best)
+
+
+def version_parts(best=False):
+ # type: (bool) -> Tuple[str, str, str]
+ """
+ Return the version of the current OS distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.version_parts(best)
+
+
+def major_version(best=False):
+ # type: (bool) -> str
+ """
+ Return the major version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.major_version(best)
+
+
+def minor_version(best=False):
+ # type: (bool) -> str
+ """
+ Return the minor version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.minor_version(best)
+
+
+def build_number(best=False):
+ # type: (bool) -> str
+ """
+ Return the build number of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.build_number(best)
+
+
+def like():
+ # type: () -> str
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current OS distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distro.like()
+
+
+def codename():
+ # type: () -> str
+ """
+ Return the codename for the release of the current OS distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distro.codename()
+
+
+def info(pretty=False, best=False):
+ # type: (bool, bool) -> InfoDict
+ """
+ Return certain machine-readable information items about the current OS
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distro.info(pretty, best)
+
+
+def os_release_info():
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current OS distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_info()
+
+
+def lsb_release_info():
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current OS distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_info()
+
+
+def distro_release_info():
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_info()
+
+
+def uname_info():
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
+def os_release_attr(attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the os-release file data source
+ of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+try:
+ from functools import cached_property
+except ImportError:
+ # Python < 3.8
+ class cached_property(object): # type: ignore
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+
+ def __init__(self, f):
+ # type: (Callable[[Any], Any]) -> None
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj, owner):
+ # type: (Any, Type[Any]) -> Any
+ assert obj is not None, "call {0} on an instance".format(self._fname)
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
+class LinuxDistribution(object):
+ """
+ Provides information about a OS distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current OS distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(
+ self,
+ include_lsb=True,
+ os_release_file="",
+ distro_release_file="",
+ include_uname=True,
+ root_dir=None,
+ ):
+ # type: (bool, str, str, bool, Optional[str]) -> None
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ * ``include_uname`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ * ``root_dir`` (string): The absolute path to the root directory to use
+ to find distro-related information files.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
+ Raises:
+
+ * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
+ some issue (other than not being available in the program execution
+ path).
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.root_dir = root_dir
+ self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
+ self.usr_lib_dir = (
+ os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
+ )
+
+ if os_release_file:
+ self.os_release_file = os_release_file
+ else:
+ etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
+ usr_lib_os_release_file = os.path.join(
+ self.usr_lib_dir, _OS_RELEASE_BASENAME
+ )
+
+ # NOTE: The idea is to respect order **and** have it set
+ # at all times for API backwards compatibility.
+ if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
+ usr_lib_os_release_file
+ ):
+ self.os_release_file = etc_dir_os_release_file
+ else:
+ self.os_release_file = usr_lib_os_release_file
+
+ self.distro_release_file = distro_release_file or "" # updated later
+ self.include_lsb = include_lsb
+ self.include_uname = include_uname
+
+ def __repr__(self):
+ # type: () -> str
+ """Return repr of all info"""
+ return (
+ "LinuxDistribution("
+ "os_release_file={self.os_release_file!r}, "
+ "distro_release_file={self.distro_release_file!r}, "
+ "include_lsb={self.include_lsb!r}, "
+ "include_uname={self.include_uname!r}, "
+ "_os_release_info={self._os_release_info!r}, "
+ "_lsb_release_info={self._lsb_release_info!r}, "
+ "_distro_release_info={self._distro_release_info!r}, "
+ "_uname_info={self._uname_info!r})".format(self=self)
+ )
+
+ def linux_distribution(self, full_distribution_name=True):
+ # type: (bool) -> Tuple[str, str, str]
+ """
+ Return information about the OS distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self.codename(),
+ )
+
+ def id(self):
+ # type: () -> str
+ """Return the distro ID of the OS distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+
+ def normalize(distro_id, table):
+ # type: (str, Dict[str, str]) -> str
+ distro_id = distro_id.lower().replace(" ", "_")
+ return table.get(distro_id, distro_id)
+
+ distro_id = self.os_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_OS_ID)
+
+ distro_id = self.lsb_release_attr("distributor_id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_LSB_ID)
+
+ distro_id = self.distro_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ distro_id = self.uname_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ""
+
+ def name(self, pretty=False):
+ # type: (bool) -> str
+ """
+ Return the name of the OS distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = (
+ self.os_release_attr("name")
+ or self.lsb_release_attr("distributor_id")
+ or self.distro_release_attr("name")
+ or self.uname_attr("name")
+ )
+ if pretty:
+ name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
+ "description"
+ )
+ if not name:
+ name = self.distro_release_attr("name") or self.uname_attr("name")
+ version = self.version(pretty=True)
+ if version:
+ name = name + " " + version
+ return name or ""
+
+ def version(self, pretty=False, best=False):
+ # type: (bool, bool) -> str
+ """
+ Return the version of the OS distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr("version_id"),
+ self.lsb_release_attr("release"),
+ self.distro_release_attr("version_id"),
+ self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
+ "version_id", ""
+ ),
+ self._parse_distro_release_content(
+ self.lsb_release_attr("description")
+ ).get("version_id", ""),
+ self.uname_attr("release"),
+ ]
+ version = ""
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == "":
+ version = v
+ else:
+ for v in versions:
+ if v != "":
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = "{0} ({1})".format(version, self.codename())
+ return version
+
+ def version_parts(self, best=False):
+ # type: (bool) -> Tuple[str, str, str]
+ """
+ Return the version of the OS distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
+ matches = version_regex.match(version_str)
+ if matches:
+ major, minor, build_number = matches.groups()
+ return major, minor or "", build_number or ""
+ return "", "", ""
+
+ def major_version(self, best=False):
+ # type: (bool) -> str
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best=False):
+ # type: (bool) -> str
+ """
+ Return the minor version number of the current distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best=False):
+ # type: (bool) -> str
+ """
+ Return the build number of the current distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self):
+ # type: () -> str
+ """
+ Return the IDs of distributions that are like the OS distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr("id_like") or ""
+
+ def codename(self):
+ # type: () -> str
+ """
+ Return the codename of the OS distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info["codename"]
+ except KeyError:
+ return (
+ self.lsb_release_attr("codename")
+ or self.distro_release_attr("codename")
+ or ""
+ )
+
+ def info(self, pretty=False, best=False):
+ # type: (bool, bool) -> InfoDict
+ """
+ Return certain machine-readable information about the OS
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best),
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the OS distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the OS
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the OS
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def uname_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
+ def os_release_attr(self, attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the os-release file data
+ source of the OS distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, "")
+
+ def lsb_release_attr(self, attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, "")
+
+ def distro_release_attr(self, attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the distro release file
+ data source of the OS distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, "")
+
+ def uname_attr(self, attribute):
+ # type: (str) -> str
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_attr`.
+ """
+ return self._uname_info.get(attribute, "")
+
+ @cached_property
+ def _os_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file) as release_file:
+ return self._parse_os_release_content(release_file)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines):
+ # type: (TextIO) -> Dict[str, str]
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ # The shlex module defines its `wordchars` variable using literals,
+ # making it dependent on the encoding of the Python source file.
+ # In Python 2.6 and 2.7, the shlex source file is encoded in
+ # 'iso-8859-1', and the `wordchars` variable is defined as a byte
+ # string. This causes a UnicodeDecodeError to be raised when the
+ # parsed content is a unicode object. The following fix resolves that
+ # (... but it should be fixed in shlex...):
+ if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
+ lexer.wordchars = lexer.wordchars.decode("iso-8859-1")
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ if "=" in token:
+ k, v = token.split("=", 1)
+ props[k.lower()] = v
+ else:
+ # Ignore any tokens that are not variable assignments
+ pass
+
+ if "version_codename" in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props["codename"] = props["version_codename"]
+ elif "ubuntu_codename" in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props["codename"] = props["ubuntu_codename"]
+ elif "version" in props:
+ # If there is no version_codename, parse it from the version
+ match = re.search(r"(\(\D+\))|,(\s+)?\D+", props["version"])
+ if match:
+ codename = match.group()
+ codename = codename.strip("()")
+ codename = codename.strip(",")
+ codename = codename.strip()
+ # codename appears within paranthese.
+ props["codename"] = codename
+
+ return props
+
+ @cached_property
+ def _lsb_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if not self.include_lsb:
+ return {}
+ with open(os.devnull, "wb") as devnull:
+ try:
+ cmd = ("lsb_release", "-a")
+ stdout = _check_output(cmd, stderr=devnull)
+ # Command not found or lsb_release returned error
+ except (OSError, subprocess.CalledProcessError):
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_lsb_release_content(content)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines):
+ # type: (Iterable[str]) -> Dict[str, str]
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ kv = line.strip("\n").split(":", 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(" ", "_").lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self):
+ # type: () -> Dict[str, str]
+ with open(os.devnull, "wb") as devnull:
+ try:
+ cmd = ("uname", "-rs")
+ stdout = _check_output(cmd, stderr=devnull)
+ except OSError:
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_uname_content(content)
+
+ @staticmethod
+ def _parse_uname_content(lines):
+ # type: (Sequence[str]) -> Dict[str, str]
+ props = {}
+ match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == "Linux":
+ return {}
+ props["id"] = name.lower()
+ props["name"] = name
+ props["release"] = version
+ return props
+
+ @staticmethod
+ def _to_str(text):
+ # type: (Union[bytes, str]) -> str
+ encoding = sys.getfilesystemencoding()
+ encoding = "utf-8" if encoding == "ascii" else encoding
+
+ if sys.version_info[0] >= 3:
+ if isinstance(text, bytes):
+ return text.decode(encoding)
+ else:
+ if isinstance(text, unicode): # noqa pylint: disable=undefined-variable
+ return text.encode(encoding)
+
+ return text
+
+ @cached_property
+ def _distro_release_info(self):
+ # type: () -> Dict[str, str]
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if "name" in distro_info and "cloudlinux" in distro_info["name"].lower():
+ distro_info["id"] = "cloudlinux"
+ elif match:
+ distro_info["id"] = match.group(1)
+ return distro_info
+ else:
+ try:
+ basenames = os.listdir(self.etc_dir)
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ except OSError:
+ # This may occur when /etc is not readable but we can't be
+ # sure about the *-release files. Check common entries of
+ # /etc for information. If they turn out to not be there the
+ # error is handled in `_parse_distro_release_file()`.
+ basenames = [
+ "SuSE-release",
+ "arch-release",
+ "base-release",
+ "centos-release",
+ "fedora-release",
+ "gentoo-release",
+ "mageia-release",
+ "mandrake-release",
+ "mandriva-release",
+ "mandrivalinux-release",
+ "manjaro-release",
+ "oracle-release",
+ "redhat-release",
+ "sl-release",
+ "slackware-version",
+ ]
+ for basename in basenames:
+ if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
+ continue
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match:
+ filepath = os.path.join(self.etc_dir, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ if "name" in distro_info:
+ # The name is always present if the pattern matches
+ self.distro_release_file = filepath
+ distro_info["id"] = match.group(1)
+ if "cloudlinux" in distro_info["name"].lower():
+ distro_info["id"] = "cloudlinux"
+ return distro_info
+ return {}
+
+ def _parse_distro_release_file(self, filepath):
+ # type: (str) -> Dict[str, str]
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ try:
+ with open(filepath) as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ except (OSError, IOError):
+ # Ignore not being able to read a specific, seemingly version
+ # related file.
+ # See https://github.com/python-distro/distro/issues/162
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line):
+ # type: (str) -> Dict[str, str]
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
+ distro_info = {}
+ if matches:
+ # regexp ensures non-None
+ distro_info["name"] = matches.group(3)[::-1]
+ if matches.group(2):
+ distro_info["version_id"] = matches.group(2)[::-1]
+ if matches.group(1):
+ distro_info["codename"] = matches.group(1)[::-1]
+ elif line:
+ distro_info["name"] = line.strip()
+ return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main():
+ # type: () -> None
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ dist = _distro
+
+ logger.info("Name: %s", dist.name(pretty=True))
+ distribution_version = dist.version(pretty=True)
+ logger.info("Version: %s", distribution_version)
+ distribution_codename = dist.codename()
+ logger.info("Codename: %s", distribution_codename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/module_utils/errors.py b/lib/ansible/module_utils/errors.py
new file mode 100644
index 0000000..cbbd86c
--- /dev/null
+++ b/lib/ansible/module_utils/errors.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class AnsibleFallbackNotFound(Exception):
+ """Fallback validator was not found"""
+
+
+class AnsibleValidationError(Exception):
+ """Single argument spec validation error"""
+
+ def __init__(self, message):
+ super(AnsibleValidationError, self).__init__(message)
+ self.error_message = message
+ """The error message passed in when the exception was raised."""
+
+ @property
+ def msg(self):
+ """The error message passed in when the exception was raised."""
+ return self.args[0]
+
+
+class AnsibleValidationErrorMultiple(AnsibleValidationError):
+ """Multiple argument spec validation errors"""
+
+ def __init__(self, errors=None):
+ self.errors = errors[:] if errors else []
+ """:class:`list` of :class:`AnsibleValidationError` objects"""
+
+ def __getitem__(self, key):
+ return self.errors[key]
+
+ def __setitem__(self, key, value):
+ self.errors[key] = value
+
+ def __delitem__(self, key):
+ del self.errors[key]
+
+ @property
+ def msg(self):
+ """The first message from the first error in ``errors``."""
+ return self.errors[0].args[0]
+
+ @property
+ def messages(self):
+ """:class:`list` of each error message in ``errors``."""
+ return [err.msg for err in self.errors]
+
+ def append(self, error):
+ """Append a new error to ``self.errors``.
+
+ Only :class:`AnsibleValidationError` should be added.
+ """
+
+ self.errors.append(error)
+
+ def extend(self, errors):
+ """Append each item in ``errors`` to ``self.errors``. Only :class:`AnsibleValidationError` should be added."""
+ self.errors.extend(errors)
+
+
+class AliasError(AnsibleValidationError):
+ """Error handling aliases"""
+
+
+class ArgumentTypeError(AnsibleValidationError):
+ """Error with parameter type"""
+
+
+class ArgumentValueError(AnsibleValidationError):
+ """Error with parameter value"""
+
+
+class DeprecationError(AnsibleValidationError):
+ """Error processing parameter deprecations"""
+
+
+class ElementError(AnsibleValidationError):
+ """Error when validating elements"""
+
+
+class MutuallyExclusiveError(AnsibleValidationError):
+ """Mutually exclusive parameters were supplied"""
+
+
+class NoLogError(AnsibleValidationError):
+ """Error converting no_log values"""
+
+
+class RequiredByError(AnsibleValidationError):
+ """Error with parameters that are required by other parameters"""
+
+
+class RequiredDefaultError(AnsibleValidationError):
+ """A required parameter was assigned a default value"""
+
+
+class RequiredError(AnsibleValidationError):
+ """Missing a required parameter"""
+
+
+class RequiredIfError(AnsibleValidationError):
+ """Error with conditionally required parameters"""
+
+
+class RequiredOneOfError(AnsibleValidationError):
+ """Error with parameters where at least one is required"""
+
+
+class RequiredTogetherError(AnsibleValidationError):
+ """Error with parameters that are required together"""
+
+
+class SubParameterTypeError(AnsibleValidationError):
+ """Incorrect type for subparameter"""
+
+
+class UnsupportedError(AnsibleValidationError):
+ """Unsupported parameters were supplied"""
diff --git a/lib/ansible/module_utils/facts/__init__.py b/lib/ansible/module_utils/facts/__init__.py
new file mode 100644
index 0000000..96ab778
--- /dev/null
+++ b/lib/ansible/module_utils/facts/__init__.py
@@ -0,0 +1,34 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# import from the compat api because 2.0-2.3 had a module_utils.facts.ansible_facts
+# and get_all_facts in top level namespace
+from ansible.module_utils.facts.compat import ansible_facts, get_all_facts # noqa
diff --git a/lib/ansible/module_utils/facts/ansible_collector.py b/lib/ansible/module_utils/facts/ansible_collector.py
new file mode 100644
index 0000000..e9bafe2
--- /dev/null
+++ b/lib/ansible/module_utils/facts/ansible_collector.py
@@ -0,0 +1,158 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+import sys
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts import timeout
+from ansible.module_utils.facts import collector
+from ansible.module_utils.common.collections import is_string
+
+
+class AnsibleFactCollector(collector.BaseFactCollector):
+ '''A FactCollector that returns results under 'ansible_facts' top level key.
+
+ If a namespace if provided, facts will be collected under that namespace.
+ For ex, a ansible.module_utils.facts.namespace.PrefixFactNamespace(prefix='ansible_')
+
+ Has a 'from_gather_subset() constructor that populates collectors based on a
+ gather_subset specifier.'''
+
+ def __init__(self, collectors=None, namespace=None, filter_spec=None):
+
+ super(AnsibleFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ self.filter_spec = filter_spec
+
+ def _filter(self, facts_dict, filter_spec):
+ # assume filter_spec='' or filter_spec=[] is equivalent to filter_spec='*'
+ if not filter_spec or filter_spec == '*':
+ return facts_dict
+
+ if is_string(filter_spec):
+ filter_spec = [filter_spec]
+
+ found = []
+ for f in filter_spec:
+ for x, y in facts_dict.items():
+ if not f or fnmatch.fnmatch(x, f):
+ found.append((x, y))
+ elif not f.startswith(('ansible_', 'facter', 'ohai')):
+ # try to match with ansible_ prefix added when non empty
+ g = 'ansible_%s' % f
+ if fnmatch.fnmatch(x, g):
+ found.append((x, y))
+ return found
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+
+ facts_dict = {}
+
+ for collector_obj in self.collectors:
+ info_dict = {}
+
+ try:
+
+ # Note: this collects with namespaces, so collected_facts also includes namespaces
+ info_dict = collector_obj.collect_with_namespace(module=module,
+ collected_facts=collected_facts)
+ except Exception as e:
+ sys.stderr.write(repr(e))
+ sys.stderr.write('\n')
+
+ # shallow copy of the new facts to pass to each collector in collected_facts so facts
+ # can reference other facts they depend on.
+ collected_facts.update(info_dict.copy())
+
+ # NOTE: If we want complicated fact dict merging, this is where it would hook in
+ facts_dict.update(self._filter(info_dict, self.filter_spec))
+
+ return facts_dict
+
+
+class CollectorMetaDataCollector(collector.BaseFactCollector):
+ '''Collector that provides a facts with the gather_subset metadata.'''
+
+ name = 'gather_subset'
+ _fact_ids = set() # type: t.Set[str]
+
+ def __init__(self, collectors=None, namespace=None, gather_subset=None, module_setup=None):
+ super(CollectorMetaDataCollector, self).__init__(collectors, namespace)
+ self.gather_subset = gather_subset
+ self.module_setup = module_setup
+
+ def collect(self, module=None, collected_facts=None):
+ meta_facts = {'gather_subset': self.gather_subset}
+ if self.module_setup:
+ meta_facts['module_setup'] = self.module_setup
+ return meta_facts
+
+
+def get_ansible_collector(all_collector_classes,
+ namespace=None,
+ filter_spec=None,
+ gather_subset=None,
+ gather_timeout=None,
+ minimal_gather_subset=None):
+
+ filter_spec = filter_spec or []
+ gather_subset = gather_subset or ['all']
+ gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ collector_classes = \
+ collector.collector_classes_from_gather_subset(
+ all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout)
+
+ collectors = []
+ for collector_class in collector_classes:
+ collector_obj = collector_class(namespace=namespace)
+ collectors.append(collector_obj)
+
+ # Add a collector that knows what gather_subset we used so it it can provide a fact
+ collector_meta_data_collector = \
+ CollectorMetaDataCollector(gather_subset=gather_subset,
+ module_setup=True)
+ collectors.append(collector_meta_data_collector)
+
+ fact_collector = \
+ AnsibleFactCollector(collectors=collectors,
+ filter_spec=filter_spec,
+ namespace=namespace)
+
+ return fact_collector
diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py
new file mode 100644
index 0000000..ac52fe8
--- /dev/null
+++ b/lib/ansible/module_utils/facts/collector.py
@@ -0,0 +1,402 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+
+import platform
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts import timeout
+
+
+class CycleFoundInFactDeps(Exception):
+ '''Indicates there is a cycle in fact collector deps
+
+ If collector-B requires collector-A, and collector-A requires
+ collector-B, that is a cycle. In that case, there is no ordering
+ that will satisfy B before A and A and before B. That will cause this
+ error to be raised.
+ '''
+ pass
+
+
+class UnresolvedFactDep(ValueError):
+ pass
+
+
+class CollectorNotFoundError(KeyError):
+ pass
+
+
+class BaseFactCollector:
+ _fact_ids = set() # type: t.Set[str]
+
+ _platform = 'Generic'
+ name = None # type: str | None
+ required_facts = set() # type: t.Set[str]
+
+ def __init__(self, collectors=None, namespace=None):
+ '''Base class for things that collect facts.
+
+ 'collectors' is an optional list of other FactCollectors for composing.'''
+ self.collectors = collectors or []
+
+ # self.namespace is a object with a 'transform' method that transforms
+ # the name to indicate the namespace (ie, adds a prefix or suffix).
+ self.namespace = namespace
+
+ self.fact_ids = set([self.name])
+ self.fact_ids.update(self._fact_ids)
+
+ @classmethod
+ def platform_match(cls, platform_info):
+ if platform_info.get('system', None) == cls._platform:
+ return cls
+ return None
+
+ def _transform_name(self, key_name):
+ if self.namespace:
+ return self.namespace.transform(key_name)
+ return key_name
+
+ def _transform_dict_keys(self, fact_dict):
+ '''update a dicts keys to use new names as transformed by self._transform_name'''
+
+ for old_key in list(fact_dict.keys()):
+ new_key = self._transform_name(old_key)
+ # pop the item by old_key and replace it using new_key
+ fact_dict[new_key] = fact_dict.pop(old_key)
+ return fact_dict
+
+ # TODO/MAYBE: rename to 'collect' and add 'collect_without_namespace'
+ def collect_with_namespace(self, module=None, collected_facts=None):
+ # collect, then transform the key names if needed
+ facts_dict = self.collect(module=module, collected_facts=collected_facts)
+ if self.namespace:
+ facts_dict = self._transform_dict_keys(facts_dict)
+ return facts_dict
+
+ def collect(self, module=None, collected_facts=None):
+ '''do the fact collection
+
+ 'collected_facts' is a object (a dict, likely) that holds all previously
+ facts. This is intended to be used if a FactCollector needs to reference
+ another fact (for ex, the system arch) and should not be modified (usually).
+
+ Returns a dict of facts.
+
+ '''
+ facts_dict = {}
+ return facts_dict
+
+
+def get_collector_names(valid_subsets=None,
+ minimal_gather_subset=None,
+ gather_subset=None,
+ aliases_map=None,
+ platform_info=None):
+ '''return a set of FactCollector names based on gather_subset spec.
+
+ gather_subset is a spec describing which facts to gather.
+ valid_subsets is a frozenset of potential matches for gather_subset ('all', 'network') etc
+ minimal_gather_subsets is a frozenset of matches to always use, even for gather_subset='!all'
+ '''
+
+ # Retrieve module parameters
+ gather_subset = gather_subset or ['all']
+
+ # the list of everything that 'all' expands to
+ valid_subsets = valid_subsets or frozenset()
+
+ # if provided, minimal_gather_subset is always added, even after all negations
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ aliases_map = aliases_map or defaultdict(set)
+
+ # Retrieve all facts elements
+ additional_subsets = set()
+ exclude_subsets = set()
+
+ # total always starts with the min set, then
+ # adds of the additions in gather_subset, then
+ # excludes all of the excludes, then add any explicitly
+ # requested subsets.
+ gather_subset_with_min = ['min']
+ gather_subset_with_min.extend(gather_subset)
+
+ # subsets we mention in gather_subset explicitly, except for 'all'/'min'
+ explicitly_added = set()
+
+ for subset in gather_subset_with_min:
+ subset_id = subset
+ if subset_id == 'min':
+ additional_subsets.update(minimal_gather_subset)
+ continue
+ if subset_id == 'all':
+ additional_subsets.update(valid_subsets)
+ continue
+ if subset_id.startswith('!'):
+ subset = subset[1:]
+ if subset == 'min':
+ exclude_subsets.update(minimal_gather_subset)
+ continue
+ if subset == 'all':
+ exclude_subsets.update(valid_subsets - minimal_gather_subset)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if exclude:
+ # include 'devices', 'dmi' etc for '!hardware'
+ exclude_subsets.update(aliases_map.get(subset, set()))
+ exclude_subsets.add(subset)
+ else:
+ # NOTE: this only considers adding an unknown gather subsetup an error. Asking to
+ # exclude an unknown gather subset is ignored.
+ if subset_id not in valid_subsets:
+ raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" %
+ (subset, ", ".join(sorted(valid_subsets))))
+
+ explicitly_added.add(subset)
+ additional_subsets.add(subset)
+
+ if not additional_subsets:
+ additional_subsets.update(valid_subsets)
+
+ additional_subsets.difference_update(exclude_subsets - explicitly_added)
+
+ return additional_subsets
+
+
+def find_collectors_for_platform(all_collector_classes, compat_platforms):
+ found_collectors = set()
+ found_collectors_names = set()
+
+ # start from specific platform, then try generic
+ for compat_platform in compat_platforms:
+ platform_match = None
+ for all_collector_class in all_collector_classes:
+
+ # ask the class if it is compatible with the platform info
+ platform_match = all_collector_class.platform_match(compat_platform)
+
+ if not platform_match:
+ continue
+
+ primary_name = all_collector_class.name
+
+ if primary_name not in found_collectors_names:
+ found_collectors.add(all_collector_class)
+ found_collectors_names.add(all_collector_class.name)
+
+ return found_collectors
+
+
+def build_fact_id_to_collector_map(collectors_for_platform):
+ fact_id_to_collector_map = defaultdict(list)
+ aliases_map = defaultdict(set)
+
+ for collector_class in collectors_for_platform:
+ primary_name = collector_class.name
+
+ fact_id_to_collector_map[primary_name].append(collector_class)
+
+ for fact_id in collector_class._fact_ids:
+ fact_id_to_collector_map[fact_id].append(collector_class)
+ aliases_map[primary_name].add(fact_id)
+
+ return fact_id_to_collector_map, aliases_map
+
+
+def select_collector_classes(collector_names, all_fact_subsets):
+ seen_collector_classes = set()
+
+ selected_collector_classes = []
+
+ for collector_name in collector_names:
+ collector_classes = all_fact_subsets.get(collector_name, [])
+ for collector_class in collector_classes:
+ if collector_class not in seen_collector_classes:
+ selected_collector_classes.append(collector_class)
+ seen_collector_classes.add(collector_class)
+
+ return selected_collector_classes
+
+
+def _get_requires_by_collector_name(collector_name, all_fact_subsets):
+ required_facts = set()
+
+ try:
+ collector_classes = all_fact_subsets[collector_name]
+ except KeyError:
+ raise CollectorNotFoundError('Fact collector "%s" not found' % collector_name)
+ for collector_class in collector_classes:
+ required_facts.update(collector_class.required_facts)
+ return required_facts
+
+
+def find_unresolved_requires(collector_names, all_fact_subsets):
+ '''Find any collector names that have unresolved requires
+
+ Returns a list of collector names that correspond to collector
+ classes whose .requires_facts() are not in collector_names.
+ '''
+ unresolved = set()
+
+ for collector_name in collector_names:
+ required_facts = _get_requires_by_collector_name(collector_name, all_fact_subsets)
+ for required_fact in required_facts:
+ if required_fact not in collector_names:
+ unresolved.add(required_fact)
+
+ return unresolved
+
+
+def resolve_requires(unresolved_requires, all_fact_subsets):
+ new_names = set()
+ failed = []
+ for unresolved in unresolved_requires:
+ if unresolved in all_fact_subsets:
+ new_names.add(unresolved)
+ else:
+ failed.append(unresolved)
+
+ if failed:
+ raise UnresolvedFactDep('unresolved fact dep %s' % ','.join(failed))
+ return new_names
+
+
+def build_dep_data(collector_names, all_fact_subsets):
+ dep_map = defaultdict(set)
+ for collector_name in collector_names:
+ collector_deps = set()
+ for collector in all_fact_subsets[collector_name]:
+ for dep in collector.required_facts:
+ collector_deps.add(dep)
+ dep_map[collector_name] = collector_deps
+ return dep_map
+
+
+def tsort(dep_map):
+ sorted_list = []
+
+ unsorted_map = dep_map.copy()
+
+ while unsorted_map:
+ acyclic = False
+ for node, edges in list(unsorted_map.items()):
+ for edge in edges:
+ if edge in unsorted_map:
+ break
+ else:
+ acyclic = True
+ del unsorted_map[node]
+ sorted_list.append((node, edges))
+
+ if not acyclic:
+ raise CycleFoundInFactDeps('Unable to tsort deps, there was a cycle in the graph. sorted=%s' % sorted_list)
+
+ return sorted_list
+
+
+def _solve_deps(collector_names, all_fact_subsets):
+ unresolved = collector_names.copy()
+ solutions = collector_names.copy()
+
+ while True:
+ unresolved = find_unresolved_requires(solutions, all_fact_subsets)
+ if unresolved == set():
+ break
+
+ new_names = resolve_requires(unresolved, all_fact_subsets)
+ solutions.update(new_names)
+
+ return solutions
+
+
+def collector_classes_from_gather_subset(all_collector_classes=None,
+ valid_subsets=None,
+ minimal_gather_subset=None,
+ gather_subset=None,
+ gather_timeout=None,
+ platform_info=None):
+ '''return a list of collector classes that match the args'''
+
+ # use gather_name etc to get the list of collectors
+
+ all_collector_classes = all_collector_classes or []
+
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ platform_info = platform_info or {'system': platform.system()}
+
+ gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
+
+ # tweak the modules GATHER_TIMEOUT
+ timeout.GATHER_TIMEOUT = gather_timeout
+
+ valid_subsets = valid_subsets or frozenset()
+
+ # maps alias names like 'hardware' to the list of names that are part of hardware
+ # like 'devices' and 'dmi'
+ aliases_map = defaultdict(set)
+
+ compat_platforms = [platform_info, {'system': 'Generic'}]
+
+ collectors_for_platform = find_collectors_for_platform(all_collector_classes, compat_platforms)
+
+ # all_facts_subsets maps the subset name ('hardware') to the class that provides it.
+
+ # TODO: name collisions here? are there facts with the same name as a gather_subset (all, network, hardware, virtual, ohai, facter)
+ all_fact_subsets, aliases_map = build_fact_id_to_collector_map(collectors_for_platform)
+
+ all_valid_subsets = frozenset(all_fact_subsets.keys())
+
+ # expand any fact_id/collectorname/gather_subset term ('all', 'env', etc) to the list of names that represents
+ collector_names = get_collector_names(valid_subsets=all_valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset,
+ aliases_map=aliases_map,
+ platform_info=platform_info)
+
+ complete_collector_names = _solve_deps(collector_names, all_fact_subsets)
+
+ dep_map = build_dep_data(complete_collector_names, all_fact_subsets)
+
+ ordered_deps = tsort(dep_map)
+ ordered_collector_names = [x[0] for x in ordered_deps]
+
+ selected_collector_classes = select_collector_classes(ordered_collector_names,
+ all_fact_subsets)
+
+ return selected_collector_classes
diff --git a/lib/ansible/module_utils/facts/compat.py b/lib/ansible/module_utils/facts/compat.py
new file mode 100644
index 0000000..a69fee3
--- /dev/null
+++ b/lib/ansible/module_utils/facts/compat.py
@@ -0,0 +1,87 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+from ansible.module_utils.facts import default_collectors
+from ansible.module_utils.facts import ansible_collector
+
+
+def get_all_facts(module):
+ '''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method
+
+ Expects module to be an instance of AnsibleModule, with a 'gather_subset' param.
+
+ returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
+ the fact value.'''
+
+ gather_subset = module.params['gather_subset']
+ return ansible_facts(module, gather_subset=gather_subset)
+
+
+def ansible_facts(module, gather_subset=None):
+ '''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method
+
+ 2.3/2.3 expects a gather_subset arg.
+ 2.0/2.1 does not except a gather_subset arg
+
+ So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT
+
+ 'module' should be an instance of an AnsibleModule.
+
+ returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
+ the fact value.
+ '''
+
+ gather_subset = gather_subset or module.params.get('gather_subset', ['all'])
+ gather_timeout = module.params.get('gather_timeout', 10)
+ filter_spec = module.params.get('filter', '*')
+
+ minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
+ 'distribution', 'dns', 'env', 'fips', 'local',
+ 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
+ 'service_mgr', 'ssh_pub_keys', 'user'])
+
+ all_collector_classes = default_collectors.collectors
+
+ # don't add a prefix
+ namespace = PrefixFactNamespace(namespace_name='ansible', prefix='')
+
+ fact_collector = \
+ ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+
+ facts_dict = fact_collector.collect(module=module)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py
new file mode 100644
index 0000000..cf0ef23
--- /dev/null
+++ b/lib/ansible/module_utils/facts/default_collectors.py
@@ -0,0 +1,177 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+from ansible.module_utils.facts.other.facter import FacterFactCollector
+from ansible.module_utils.facts.other.ohai import OhaiFactCollector
+
+from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
+from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
+from ansible.module_utils.facts.system.chroot import ChrootFactCollector
+from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector
+from ansible.module_utils.facts.system.distribution import DistributionFactCollector
+from ansible.module_utils.facts.system.date_time import DateTimeFactCollector
+from ansible.module_utils.facts.system.env import EnvFactCollector
+from ansible.module_utils.facts.system.dns import DnsFactCollector
+from ansible.module_utils.facts.system.fips import FipsFactCollector
+from ansible.module_utils.facts.system.loadavg import LoadAvgFactCollector
+from ansible.module_utils.facts.system.local import LocalFactCollector
+from ansible.module_utils.facts.system.lsb import LSBFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import OpenBSDPkgMgrFactCollector
+from ansible.module_utils.facts.system.platform import PlatformFactCollector
+from ansible.module_utils.facts.system.python import PythonFactCollector
+from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
+from ansible.module_utils.facts.system.user import UserFactCollector
+
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.aix import AIXHardwareCollector
+from ansible.module_utils.facts.hardware.darwin import DarwinHardwareCollector
+from ansible.module_utils.facts.hardware.dragonfly import DragonFlyHardwareCollector
+from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardwareCollector
+from ansible.module_utils.facts.hardware.hpux import HPUXHardwareCollector
+from ansible.module_utils.facts.hardware.hurd import HurdHardwareCollector
+from ansible.module_utils.facts.hardware.linux import LinuxHardwareCollector
+from ansible.module_utils.facts.hardware.netbsd import NetBSDHardwareCollector
+from ansible.module_utils.facts.hardware.openbsd import OpenBSDHardwareCollector
+from ansible.module_utils.facts.hardware.sunos import SunOSHardwareCollector
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.aix import AIXNetworkCollector
+from ansible.module_utils.facts.network.darwin import DarwinNetworkCollector
+from ansible.module_utils.facts.network.dragonfly import DragonFlyNetworkCollector
+from ansible.module_utils.facts.network.fc_wwn import FcWwnInitiatorFactCollector
+from ansible.module_utils.facts.network.freebsd import FreeBSDNetworkCollector
+from ansible.module_utils.facts.network.hpux import HPUXNetworkCollector
+from ansible.module_utils.facts.network.hurd import HurdNetworkCollector
+from ansible.module_utils.facts.network.linux import LinuxNetworkCollector
+from ansible.module_utils.facts.network.iscsi import IscsiInitiatorNetworkCollector
+from ansible.module_utils.facts.network.nvme import NvmeInitiatorNetworkCollector
+from ansible.module_utils.facts.network.netbsd import NetBSDNetworkCollector
+from ansible.module_utils.facts.network.openbsd import OpenBSDNetworkCollector
+from ansible.module_utils.facts.network.sunos import SunOSNetworkCollector
+
+from ansible.module_utils.facts.virtual.base import VirtualCollector
+from ansible.module_utils.facts.virtual.dragonfly import DragonFlyVirtualCollector
+from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtualCollector
+from ansible.module_utils.facts.virtual.hpux import HPUXVirtualCollector
+from ansible.module_utils.facts.virtual.linux import LinuxVirtualCollector
+from ansible.module_utils.facts.virtual.netbsd import NetBSDVirtualCollector
+from ansible.module_utils.facts.virtual.openbsd import OpenBSDVirtualCollector
+from ansible.module_utils.facts.virtual.sunos import SunOSVirtualCollector
+
+# these should always be first due to most other facts depending on them
+_base = [
+ PlatformFactCollector,
+ DistributionFactCollector,
+ LSBFactCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+# These restrict what is possible in others
+_restrictive = [
+ SelinuxFactCollector,
+ ApparmorFactCollector,
+ ChrootFactCollector,
+ FipsFactCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+# general info, not required but probably useful for other facts
+_general = [
+ PythonFactCollector,
+ SystemCapabilitiesFactCollector,
+ PkgMgrFactCollector,
+ OpenBSDPkgMgrFactCollector,
+ ServiceMgrFactCollector,
+ CmdLineFactCollector,
+ DateTimeFactCollector,
+ EnvFactCollector,
+ LoadAvgFactCollector,
+ SshPubKeyFactCollector,
+ UserFactCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+# virtual, this might also limit hardware/networking
+_virtual = [
+ VirtualCollector,
+ DragonFlyVirtualCollector,
+ FreeBSDVirtualCollector,
+ LinuxVirtualCollector,
+ OpenBSDVirtualCollector,
+ NetBSDVirtualCollector,
+ SunOSVirtualCollector,
+ HPUXVirtualCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+_hardware = [
+ HardwareCollector,
+ AIXHardwareCollector,
+ DarwinHardwareCollector,
+ DragonFlyHardwareCollector,
+ FreeBSDHardwareCollector,
+ HPUXHardwareCollector,
+ HurdHardwareCollector,
+ LinuxHardwareCollector,
+ NetBSDHardwareCollector,
+ OpenBSDHardwareCollector,
+ SunOSHardwareCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+_network = [
+ DnsFactCollector,
+ FcWwnInitiatorFactCollector,
+ NetworkCollector,
+ AIXNetworkCollector,
+ DarwinNetworkCollector,
+ DragonFlyNetworkCollector,
+ FreeBSDNetworkCollector,
+ HPUXNetworkCollector,
+ HurdNetworkCollector,
+ IscsiInitiatorNetworkCollector,
+ NvmeInitiatorNetworkCollector,
+ LinuxNetworkCollector,
+ NetBSDNetworkCollector,
+ OpenBSDNetworkCollector,
+ SunOSNetworkCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+# other fact sources
+_extra_facts = [
+ LocalFactCollector,
+ FacterFactCollector,
+ OhaiFactCollector
+] # type: t.List[t.Type[BaseFactCollector]]
+
+# TODO: make config driven
+collectors = _base + _restrictive + _general + _virtual + _hardware + _network + _extra_facts
diff --git a/lib/ansible/module_utils/facts/hardware/__init__.py b/lib/ansible/module_utils/facts/hardware/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/__init__.py
diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py
new file mode 100644
index 0000000..dc37394
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/aix.py
@@ -0,0 +1,266 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.utils import get_mount_size
+
+
+class AIXHardware(Hardware):
+ """
+ AIX-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_count
+ - processor_cores
+ - processor_threads_per_core
+ - processor_vcpus
+ """
+ platform = 'AIX'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ vgs_facts = self.get_vgs_facts()
+ mount_facts = self.get_mount_facts()
+ devices_facts = self.get_device_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(vgs_facts)
+ hardware_facts.update(mount_facts)
+ hardware_facts.update(devices_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ cpu_facts['processor'] = []
+
+ # FIXME: not clear how to detect multi-sockets
+ cpu_facts['processor_count'] = 1
+ rc, out, err = self.module.run_command(
+ "/usr/sbin/lsdev -Cc processor"
+ )
+ if out:
+ i = 0
+ for line in out.splitlines():
+
+ if 'Available' in line:
+ if i == 0:
+ data = line.split(' ')
+ cpudev = data[0]
+
+ i += 1
+ cpu_facts['processor_cores'] = int(i)
+
+ rc, out, err = self.module.run_command(
+ "/usr/sbin/lsattr -El " + cpudev + " -a type"
+ )
+
+ data = out.split(' ')
+ cpu_facts['processor'] = [data[1]]
+
+ cpu_facts['processor_threads_per_core'] = 1
+ rc, out, err = self.module.run_command(
+ "/usr/sbin/lsattr -El " + cpudev + " -a smt_threads"
+ )
+ if out:
+ data = out.split(' ')
+ cpu_facts['processor_threads_per_core'] = int(data[1])
+ cpu_facts['processor_vcpus'] = (
+ cpu_facts['processor_cores'] * cpu_facts['processor_threads_per_core']
+ )
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ pagesize = 4096
+ rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
+ for line in out.splitlines():
+ data = line.split()
+ if 'memory pages' in line:
+ pagecount = int(data[0])
+ if 'free pages' in line:
+ freecount = int(data[0])
+ memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
+ memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
+ # Get swapinfo. swapinfo output looks like:
+ # Device 1M-blocks Used Avail Capacity
+ # /dev/ada0p3 314368 0 314368 0%
+ #
+ rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
+ if out:
+ lines = out.splitlines()
+ data = lines[1].split()
+ swaptotal_mb = int(data[0].rstrip('MB'))
+ percused = int(data[1].rstrip('%'))
+ memory_facts['swaptotal_mb'] = swaptotal_mb
+ memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
+
+ return memory_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+
+ rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
+ data = out.split()
+ dmi_facts['firmware_version'] = data[1].strip('IBM,')
+ lsconf_path = self.module.get_bin_path("lsconf")
+ if lsconf_path:
+ rc, out, err = self.module.run_command(lsconf_path)
+ if rc == 0 and out:
+ for line in out.splitlines():
+ data = line.split(':')
+ if 'Machine Serial Number' in line:
+ dmi_facts['product_serial'] = data[1].strip()
+ if 'LPAR Info' in line:
+ dmi_facts['lpar_info'] = data[1].strip()
+ if 'System Model' in line:
+ dmi_facts['product_name'] = data[1].strip()
+ return dmi_facts
+
+ def get_vgs_facts(self):
+ """
+ Get vg and pv Facts
+ rootvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk0 active 546 0 00..00..00..00..00
+ hdisk1 active 546 113 00..00..00..21..92
+ realsyncvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk74 active 1999 6 00..00..00..00..06
+ testvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk105 active 999 838 200..39..199..200..200
+ hdisk106 active 999 599 200..00..00..199..200
+ """
+
+ vgs_facts = {}
+ lsvg_path = self.module.get_bin_path("lsvg")
+ xargs_path = self.module.get_bin_path("xargs")
+ cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
+ if lsvg_path and xargs_path:
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc == 0 and out:
+ vgs_facts['vgs'] = {}
+ for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
+ vgs_facts['vgs'][m.group(1)] = []
+ pp_size = 0
+ cmd = "%s %s" % (lsvg_path, m.group(1))
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0 and out:
+ pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
+ for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
+ pv_info = {'pv_name': n.group(1),
+ 'pv_state': n.group(2),
+ 'total_pps': n.group(3),
+ 'free_pps': n.group(4),
+ 'pp_size': pp_size
+ }
+ vgs_facts['vgs'][m.group(1)].append(pv_info)
+
+ return vgs_facts
+
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+
+ mounts = []
+
+ # AIX does not have mtab but mount command is only source of info (or to use
+ # api calls to get same info)
+ mount_path = self.module.get_bin_path('mount')
+ rc, mount_out, err = self.module.run_command(mount_path)
+ if mount_out:
+ for line in mount_out.split('\n'):
+ fields = line.split()
+ if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
+ if re.match('^/', fields[0]):
+ # normal mount
+ mount = fields[1]
+ mount_info = {'mount': mount,
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[6],
+ 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
+ mount_info.update(get_mount_size(mount))
+ else:
+ # nfs or cifs based mount
+ # in case of nfs if no mount options are provided on command line
+ # add into fields empty string...
+ if len(fields) < 8:
+ fields.append("")
+
+ mount_info = {'mount': fields[2],
+ 'device': '%s:%s' % (fields[0], fields[1]),
+ 'fstype': fields[3],
+ 'options': fields[7],
+ 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
+
+ mounts.append(mount_info)
+
+ mount_facts['mounts'] = mounts
+
+ return mount_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+ device_facts['devices'] = {}
+
+ lsdev_cmd = self.module.get_bin_path('lsdev', True)
+ lsattr_cmd = self.module.get_bin_path('lsattr', True)
+ rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
+
+ for line in out_lsdev.splitlines():
+ field = line.split()
+
+ device_attrs = {}
+ device_name = field[0]
+ device_state = field[1]
+ device_type = field[2:]
+ lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
+ rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
+ for attr in out_lsattr.splitlines():
+ attr_fields = attr.split()
+ attr_name = attr_fields[0]
+ attr_parameter = attr_fields[1]
+ device_attrs[attr_name] = attr_parameter
+
+ device_facts['devices'][device_name] = {
+ 'state': device_state,
+ 'type': ' '.join(device_type),
+ 'attributes': device_attrs
+ }
+
+ return device_facts
+
+
+class AIXHardwareCollector(HardwareCollector):
+ _platform = 'AIX'
+ _fact_class = AIXHardware
diff --git a/lib/ansible/module_utils/facts/hardware/base.py b/lib/ansible/module_utils/facts/hardware/base.py
new file mode 100644
index 0000000..846bb30
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/base.py
@@ -0,0 +1,68 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Hardware:
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init when we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ def populate(self, collected_facts=None):
+ return {}
+
+
+class HardwareCollector(BaseFactCollector):
+ name = 'hardware'
+ _fact_ids = set(['processor',
+ 'processor_cores',
+ 'processor_count',
+ # TODO: mounts isnt exactly hardware
+ 'mounts',
+ 'devices']) # type: t.Set[str]
+ _fact_class = Hardware
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py
new file mode 100644
index 0000000..d6a8e11
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/darwin.py
@@ -0,0 +1,159 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import struct
+import time
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class DarwinHardware(Hardware):
+ """
+ Darwin-specific subclass of Hardware. Defines memory and CPU facts:
+ - processor
+ - processor_cores
+ - memtotal_mb
+ - memfree_mb
+ - model
+ - osversion
+ - osrevision
+ - uptime_seconds
+ """
+ platform = 'Darwin'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
+ mac_facts = self.get_mac_facts()
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ uptime_facts = self.get_uptime_facts()
+
+ hardware_facts.update(mac_facts)
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(uptime_facts)
+
+ return hardware_facts
+
+ def get_system_profile(self):
+ rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
+ if rc != 0:
+ return dict()
+ system_profile = dict()
+ for line in out.splitlines():
+ if ': ' in line:
+ (key, value) = line.split(': ', 1)
+ system_profile[key.strip()] = ' '.join(value.strip().split())
+ return system_profile
+
+ def get_mac_facts(self):
+ mac_facts = {}
+ rc, out, err = self.module.run_command("sysctl hw.model")
+ if rc == 0:
+ mac_facts['model'] = mac_facts['product_name'] = out.splitlines()[-1].split()[1]
+ mac_facts['osversion'] = self.sysctl['kern.osversion']
+ mac_facts['osrevision'] = self.sysctl['kern.osrevision']
+
+ return mac_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ if 'machdep.cpu.brand_string' in self.sysctl: # Intel
+ cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
+ cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
+ else: # PowerPC
+ system_profile = self.get_system_profile()
+ cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
+ cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
+ cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {
+ 'memtotal_mb': int(self.sysctl['hw.memsize']) // 1024 // 1024,
+ 'memfree_mb': 0,
+ }
+
+ total_used = 0
+ page_size = 4096
+ try:
+ vm_stat_command = get_bin_path('vm_stat')
+ except ValueError:
+ return memory_facts
+
+ rc, out, err = self.module.run_command(vm_stat_command)
+ if rc == 0:
+ # Free = Total - (Wired + active + inactive)
+ # Get a generator of tuples from the command output so we can later
+ # turn it into a dictionary
+ memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
+
+ # Strip extra left spaces from the value
+ memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
+
+ for k, v in memory_stats.items():
+ try:
+ memory_stats[k] = int(v)
+ except ValueError:
+ # Most values convert cleanly to integer values but if the field does
+ # not convert to an integer, just leave it alone.
+ pass
+
+ if memory_stats.get('Pages wired down'):
+ total_used += memory_stats['Pages wired down'] * page_size
+ if memory_stats.get('Pages active'):
+ total_used += memory_stats['Pages active'] * page_size
+ if memory_stats.get('Pages inactive'):
+ total_used += memory_stats['Pages inactive'] * page_size
+
+ memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
+
+ return memory_facts
+
+ def get_uptime_facts(self):
+ # On Darwin, the default format is annoying to parse.
+ # Use -b to get the raw value and decode it.
+ sysctl_cmd = self.module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd, '-b', 'kern.boottime']
+
+ # We need to get raw bytes, not UTF-8.
+ rc, out, err = self.module.run_command(cmd, encoding=None)
+
+ # kern.boottime returns seconds and microseconds as two 64-bits
+ # fields, but we are only interested in the first field.
+ struct_format = '@L'
+ struct_size = struct.calcsize(struct_format)
+ if rc != 0 or len(out) < struct_size:
+ return {}
+
+ (kern_boottime, ) = struct.unpack(struct_format, out[:struct_size])
+
+ return {
+ 'uptime_seconds': int(time.time() - kern_boottime),
+ }
+
+
+class DarwinHardwareCollector(HardwareCollector):
+ _fact_class = DarwinHardware
+ _platform = 'Darwin'
diff --git a/lib/ansible/module_utils/facts/hardware/dragonfly.py b/lib/ansible/module_utils/facts/hardware/dragonfly.py
new file mode 100644
index 0000000..ea24151
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/dragonfly.py
@@ -0,0 +1,26 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware
+
+
+class DragonFlyHardwareCollector(HardwareCollector):
+ # Note: This uses the freebsd fact class, there is no dragonfly hardware fact class
+ _fact_class = FreeBSDHardware
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/hardware/freebsd.py b/lib/ansible/module_utils/facts/hardware/freebsd.py
new file mode 100644
index 0000000..cce2ab2
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/freebsd.py
@@ -0,0 +1,241 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import re
+import struct
+import time
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.timeout import TimeoutError, timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+
+
+class FreeBSDHardware(Hardware):
+ """
+ FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - devices
+ - uptime_seconds
+ """
+ platform = 'FreeBSD'
+ DMESG_BOOT = '/var/run/dmesg.boot'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ uptime_facts = self.get_uptime_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ cpu_facts['processor'] = []
+ sysctl = self.module.get_bin_path('sysctl')
+ if sysctl:
+ rc, out, err = self.module.run_command("%s -n hw.ncpu" % sysctl, check_rc=False)
+ cpu_facts['processor_count'] = out.strip()
+
+ dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
+ if not dmesg_boot:
+ try:
+ rc, dmesg_boot, err = self.module.run_command(self.module.get_bin_path("dmesg"), check_rc=False)
+ except Exception:
+ dmesg_boot = ''
+
+ for line in dmesg_boot.splitlines():
+ if 'CPU:' in line:
+ cpu = re.sub(r'CPU:\s+', r"", line)
+ cpu_facts['processor'].append(cpu.strip())
+ if 'Logical CPUs per core' in line:
+ cpu_facts['processor_cores'] = line.split()[4]
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+
+ sysctl = self.module.get_bin_path('sysctl')
+ if sysctl:
+ rc, out, err = self.module.run_command("%s vm.stats" % sysctl, check_rc=False)
+ for line in out.splitlines():
+ data = line.split()
+ if 'vm.stats.vm.v_page_size' in line:
+ pagesize = int(data[1])
+ if 'vm.stats.vm.v_page_count' in line:
+ pagecount = int(data[1])
+ if 'vm.stats.vm.v_free_count' in line:
+ freecount = int(data[1])
+ memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
+ memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
+
+ swapinfo = self.module.get_bin_path('swapinfo')
+ if swapinfo:
+ # Get swapinfo. swapinfo output looks like:
+ # Device 1M-blocks Used Avail Capacity
+ # /dev/ada0p3 314368 0 314368 0%
+ #
+ rc, out, err = self.module.run_command("%s -k" % swapinfo)
+ lines = out.splitlines()
+ if len(lines[-1]) == 0:
+ lines.pop()
+ data = lines[-1].split()
+ if data[0] != 'Device':
+ memory_facts['swaptotal_mb'] = int(data[1]) // 1024
+ memory_facts['swapfree_mb'] = int(data[3]) // 1024
+
+ return memory_facts
+
+ def get_uptime_facts(self):
+ # On FreeBSD, the default format is annoying to parse.
+ # Use -b to get the raw value and decode it.
+ sysctl_cmd = self.module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd, '-b', 'kern.boottime']
+
+ # We need to get raw bytes, not UTF-8.
+ rc, out, err = self.module.run_command(cmd, encoding=None)
+
+ # kern.boottime returns seconds and microseconds as two 64-bits
+ # fields, but we are only interested in the first field.
+ struct_format = '@L'
+ struct_size = struct.calcsize(struct_format)
+ if rc != 0 or len(out) < struct_size:
+ return {}
+
+ (kern_boottime, ) = struct.unpack(struct_format, out[:struct_size])
+
+ return {
+ 'uptime_seconds': int(time.time() - kern_boottime),
+ }
+
+ @timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+ if fstab:
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+
+ return mount_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+
+ sysdir = '/dev'
+ device_facts['devices'] = {}
+ drives = re.compile(r'(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
+ slices = re.compile(r'(ada?\d+s\d+\w*|da\d+s\d+\w*)')
+ if os.path.isdir(sysdir):
+ dirlist = sorted(os.listdir(sysdir))
+ for device in dirlist:
+ d = drives.match(device)
+ if d:
+ device_facts['devices'][d.group(1)] = []
+ s = slices.match(device)
+ if s:
+ device_facts['devices'][d.group(1)].append(s.group(1))
+
+ return device_facts
+
+ def get_dmi_facts(self):
+ ''' learn dmi facts from system
+
+ Use dmidecode executable if available'''
+
+ dmi_facts = {}
+
+ # Fall back to using dmidecode, if available
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ DMI_DICT = {
+ 'bios_date': 'bios-release-date',
+ 'bios_vendor': 'bios-vendor',
+ 'bios_version': 'bios-version',
+ 'board_asset_tag': 'baseboard-asset-tag',
+ 'board_name': 'baseboard-product-name',
+ 'board_serial': 'baseboard-serial-number',
+ 'board_vendor': 'baseboard-manufacturer',
+ 'board_version': 'baseboard-version',
+ 'chassis_asset_tag': 'chassis-asset-tag',
+ 'chassis_serial': 'chassis-serial-number',
+ 'chassis_vendor': 'chassis-manufacturer',
+ 'chassis_version': 'chassis-version',
+ 'form_factor': 'chassis-type',
+ 'product_name': 'system-product-name',
+ 'product_serial': 'system-serial-number',
+ 'product_uuid': 'system-uuid',
+ 'product_version': 'system-version',
+ 'system_vendor': 'system-manufacturer',
+ }
+ for (k, v) in DMI_DICT.items():
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ # FIXME: why add the fact and then test if it is json?
+ dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(dmi_facts[k])
+ except UnicodeDecodeError:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+
+ return dmi_facts
+
+
+class FreeBSDHardwareCollector(HardwareCollector):
+ _fact_class = FreeBSDHardware
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/hpux.py b/lib/ansible/module_utils/facts/hardware/hpux.py
new file mode 100644
index 0000000..ae72ed8
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/hpux.py
@@ -0,0 +1,165 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+
+
+class HPUXHardware(Hardware):
+ """
+ HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor
+ - processor_cores
+ - processor_count
+ - model
+ - firmware
+ """
+
+ platform = 'HP-UX'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
+ memory_facts = self.get_memory_facts()
+ hw_facts = self.get_hw_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(hw_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
+ rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip())
+ # Working with machinfo mess
+ elif collected_facts.get('ansible_architecture') == 'ia64':
+ if collected_facts.get('ansible_distribution_version') == "B.11.23":
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
+ if out:
+ cpu_facts['processor_count'] = int(out.strip().split('=')[1])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
+ if out:
+ cpu_facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
+ rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
+ cpu_facts['processor_cores'] = int(out.strip())
+ if collected_facts.get('ansible_distribution_version') == "B.11.31":
+ # if machinfo return cores strings release B.11.31 > 1204
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
+ if out.strip() == '0':
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
+ # If hyperthreading is active divide cores by 2
+ rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
+ data = re.sub(' +', ' ', out).strip().split(' ')
+ if len(data) == 1:
+ hyperthreading = 'OFF'
+ else:
+ hyperthreading = data[1]
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
+ data = out.strip().split(" ")
+ if hyperthreading == 'ON':
+ cpu_facts['processor_cores'] = int(data[0]) / 2
+ else:
+ if len(data) == 1:
+ cpu_facts['processor_cores'] = cpu_facts['processor_count']
+ else:
+ cpu_facts['processor_cores'] = int(data[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
+ cpu_facts['processor'] = out.strip()
+ else:
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
+ cpu_facts['processor_cores'] = int(out.strip().split(" ")[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
+ cpu_facts['processor'] = out.strip()
+
+ return cpu_facts
+
+ def get_memory_facts(self, collected_facts=None):
+ memory_facts = {}
+ collected_facts = collected_facts or {}
+
+ pagesize = 4096
+ rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
+ data = int(re.sub(' +', ' ', out).split(' ')[5].strip())
+ memory_facts['memfree_mb'] = pagesize * data // 1024 // 1024
+ if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
+ try:
+ rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
+ data = re.search('.*Physical: ([0-9]*) Kbytes.*', out).groups()[0].strip()
+ memory_facts['memtotal_mb'] = int(data) // 1024
+ except AttributeError:
+ # For systems where memory details aren't sent to syslog or the log has rotated, use parsed
+ # adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
+ if os.access("/dev/kmem", os.R_OK):
+ rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
+ use_unsafe_shell=True)
+ if not err:
+ data = out
+ memory_facts['memtotal_mb'] = int(data) / 256
+ else:
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
+ data = re.search(r'Memory[\ :=]*([0-9]*).*MB.*', out).groups()[0].strip()
+ memory_facts['memtotal_mb'] = int(data)
+ rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
+ memory_facts['swaptotal_mb'] = int(out.strip())
+ rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
+ swap = 0
+ for line in out.strip().splitlines():
+ swap += int(re.sub(' +', ' ', line).split(' ')[3].strip())
+ memory_facts['swapfree_mb'] = swap
+
+ return memory_facts
+
+ def get_hw_facts(self, collected_facts=None):
+ hw_facts = {}
+ collected_facts = collected_facts or {}
+
+ rc, out, err = self.module.run_command("model")
+ hw_facts['model'] = out.strip()
+ if collected_facts.get('ansible_architecture') == 'ia64':
+ separator = ':'
+ if collected_facts.get('ansible_distribution_version') == "B.11.23":
+ separator = '='
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
+ hw_facts['firmware_version'] = out.split(separator)[1].strip()
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ", use_unsafe_shell=True)
+ if rc == 0 and out:
+ hw_facts['product_serial'] = out.split(separator)[1].strip()
+
+ return hw_facts
+
+
+class HPUXHardwareCollector(HardwareCollector):
+ _fact_class = HPUXHardware
+ _platform = 'HP-UX'
+
+ required_facts = set(['platform', 'distribution'])
diff --git a/lib/ansible/module_utils/facts/hardware/hurd.py b/lib/ansible/module_utils/facts/hardware/hurd.py
new file mode 100644
index 0000000..306e13c
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/hurd.py
@@ -0,0 +1,53 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.timeout import TimeoutError
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.linux import LinuxHardware
+
+
+class HurdHardware(LinuxHardware):
+ """
+ GNU Hurd specific subclass of Hardware. Define memory and mount facts
+ based on procfs compatibility translator mimicking the interface of
+ the Linux kernel.
+ """
+
+ platform = 'GNU'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ uptime_facts = self.get_uptime_facts()
+ memory_facts = self.get_memory_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+
+class HurdHardwareCollector(HardwareCollector):
+ _fact_class = HurdHardware
+ _platform = 'GNU'
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
new file mode 100644
index 0000000..c0ca33d
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -0,0 +1,869 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import errno
+import glob
+import json
+import os
+import re
+import sys
+import time
+
+from multiprocessing import cpu_count
+from multiprocessing.pool import ThreadPool
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.formatters import bytes_to_human
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
+from ansible.module_utils.six import iteritems
+
+# import this as a module to ensure we get the same module instance
+from ansible.module_utils.facts import timeout
+
+
+def get_partition_uuid(partname):
+ try:
+ uuids = os.listdir("/dev/disk/by-uuid")
+ except OSError:
+ return
+
+ for uuid in uuids:
+ dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
+ if dev == ("/dev/" + partname):
+ return uuid
+
+ return None
+
+
+class LinuxHardware(Hardware):
+ """
+ Linux-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+
+ In addition, it also defines number of DMI facts and device facts.
+ """
+
+ platform = 'Linux'
+
+ # Originally only had these four as toplevelfacts
+ ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
+ # Now we have all of these in a dict structure
+ MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
+
+ # regex used against findmnt output to detect bind mounts
+ BIND_MOUNT_RE = re.compile(r'.*\]')
+
+ # regex used against mtab content to find entries that are bind mounts
+ MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
+
+ # regex used for replacing octal escape sequences
+ OCTAL_ESCAPE_RE = re.compile(r'\\[0-9]{3}')
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ locale = get_best_parsable_locale(self.module)
+ self.module.run_command_environ_update = {'LANG': locale, 'LC_ALL': locale, 'LC_NUMERIC': locale}
+
+ cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+ uptime_facts = self.get_uptime_facts()
+ lvm_facts = self.get_lvm_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except timeout.TimeoutError:
+ self.module.warn("No mount facts were gathered due to timeout.")
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(lvm_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ if not os.access("/proc/meminfo", os.R_OK):
+ return memory_facts
+
+ memstats = {}
+ for line in get_file_lines("/proc/meminfo"):
+ data = line.split(":", 1)
+ key = data[0]
+ if key in self.ORIGINAL_MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memory_facts["%s_mb" % key.lower()] = int(val) // 1024
+
+ if key in self.MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memstats[key.lower()] = int(val) // 1024
+
+ if None not in (memstats.get('memtotal'), memstats.get('memfree')):
+ memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
+ if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
+ memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
+ if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
+ memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
+ if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
+ memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
+
+ memory_facts['memory_mb'] = {
+ 'real': {
+ 'total': memstats.get('memtotal'),
+ 'used': memstats.get('real:used'),
+ 'free': memstats.get('memfree'),
+ },
+ 'nocache': {
+ 'free': memstats.get('nocache:free'),
+ 'used': memstats.get('nocache:used'),
+ },
+ 'swap': {
+ 'total': memstats.get('swaptotal'),
+ 'free': memstats.get('swapfree'),
+ 'used': memstats.get('swap:used'),
+ 'cached': memstats.get('swapcached'),
+ },
+ }
+
+ return memory_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ i = 0
+ vendor_id_occurrence = 0
+ model_name_occurrence = 0
+ processor_occurrence = 0
+ physid = 0
+ coreid = 0
+ sockets = {}
+ cores = {}
+
+ xen = False
+ xen_paravirt = False
+ try:
+ if os.path.exists('/proc/xen'):
+ xen = True
+ else:
+ for line in get_file_lines('/sys/hypervisor/type'):
+ if line.strip() == 'xen':
+ xen = True
+ # Only interested in the first line
+ break
+ except IOError:
+ pass
+
+ if not os.access("/proc/cpuinfo", os.R_OK):
+ return cpu_facts
+
+ cpu_facts['processor'] = []
+ for line in get_file_lines('/proc/cpuinfo'):
+ data = line.split(":", 1)
+ key = data[0].strip()
+
+ try:
+ val = data[1].strip()
+ except IndexError:
+ val = ""
+
+ if xen:
+ if key == 'flags':
+ # Check for vme cpu flag, Xen paravirt does not expose this.
+ # Need to detect Xen paravirt because it exposes cpuinfo
+ # differently than Xen HVM or KVM and causes reporting of
+ # only a single cpu core.
+ if 'vme' not in val:
+ xen_paravirt = True
+
+ # model name is for Intel arch, Processor (mind the uppercase P)
+ # works for some ARM devices, like the Sheevaplug.
+ # 'ncpus active' is SPARC attribute
+ if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
+ if 'processor' not in cpu_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(val)
+ if key == 'vendor_id':
+ vendor_id_occurrence += 1
+ if key == 'model name':
+ model_name_occurrence += 1
+ if key == 'processor':
+ processor_occurrence += 1
+ i += 1
+ elif key == 'physical id':
+ physid = val
+ if physid not in sockets:
+ sockets[physid] = 1
+ elif key == 'core id':
+ coreid = val
+ if coreid not in sockets:
+ cores[coreid] = 1
+ elif key == 'cpu cores':
+ sockets[physid] = int(val)
+ elif key == 'siblings':
+ cores[coreid] = int(val)
+ elif key == '# processors':
+ cpu_facts['processor_cores'] = int(val)
+ elif key == 'ncpus active':
+ i = int(val)
+
+ # Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
+ if vendor_id_occurrence > 0:
+ if vendor_id_occurrence == model_name_occurrence:
+ i = vendor_id_occurrence
+
+ # The fields for ARM CPUs do not always include 'vendor_id' or 'model name',
+ # and sometimes includes both 'processor' and 'Processor'.
+ # The fields for Power CPUs include 'processor' and 'cpu'.
+ # Always use 'processor' count for ARM and Power systems
+ if collected_facts.get('ansible_architecture', '').startswith(('armv', 'aarch', 'ppc')):
+ i = processor_occurrence
+
+ # FIXME
+ if collected_facts.get('ansible_architecture') != 's390x':
+ if xen_paravirt:
+ cpu_facts['processor_count'] = i
+ cpu_facts['processor_cores'] = i
+ cpu_facts['processor_threads_per_core'] = 1
+ cpu_facts['processor_vcpus'] = i
+ else:
+ if sockets:
+ cpu_facts['processor_count'] = len(sockets)
+ else:
+ cpu_facts['processor_count'] = i
+
+ socket_values = list(sockets.values())
+ if socket_values and socket_values[0]:
+ cpu_facts['processor_cores'] = socket_values[0]
+ else:
+ cpu_facts['processor_cores'] = 1
+
+ core_values = list(cores.values())
+ if core_values:
+ cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
+ else:
+ cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
+
+ cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
+ cpu_facts['processor_count'] * cpu_facts['processor_cores'])
+
+ # if the number of processors available to the module's
+ # thread cannot be determined, the processor count
+ # reported by /proc will be the default:
+ cpu_facts['processor_nproc'] = processor_occurrence
+
+ try:
+ cpu_facts['processor_nproc'] = len(
+ os.sched_getaffinity(0)
+ )
+ except AttributeError:
+ # In Python < 3.3, os.sched_getaffinity() is not available
+ try:
+ cmd = get_bin_path('nproc')
+ except ValueError:
+ pass
+ else:
+ rc, out, _err = self.module.run_command(cmd)
+ if rc == 0:
+ cpu_facts['processor_nproc'] = int(out)
+
+ return cpu_facts
+
+ def get_dmi_facts(self):
+ ''' learn dmi facts from system
+
+ Try /sys first for dmi related facts.
+ If that is not available, fall back to dmidecode executable '''
+
+ dmi_facts = {}
+
+ if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
+ # Use kernel DMI info, if available
+
+ # DMI SPEC -- https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.2.0.pdf
+ FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
+ "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
+ "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
+ "All In One", "Sub Notebook", "Space-saving", "Lunch Box",
+ "Main Server Chassis", "Expansion Chassis", "Sub Chassis",
+ "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
+ "Rack Mount Chassis", "Sealed-case PC", "Multi-system",
+ "CompactPCI", "AdvancedTCA", "Blade", "Blade Enclosure",
+ "Tablet", "Convertible", "Detachable", "IoT Gateway",
+ "Embedded PC", "Mini PC", "Stick PC"]
+
+ DMI_DICT = {
+ 'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
+ 'bios_vendor': '/sys/devices/virtual/dmi/id/bios_vendor',
+ 'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
+ 'board_asset_tag': '/sys/devices/virtual/dmi/id/board_asset_tag',
+ 'board_name': '/sys/devices/virtual/dmi/id/board_name',
+ 'board_serial': '/sys/devices/virtual/dmi/id/board_serial',
+ 'board_vendor': '/sys/devices/virtual/dmi/id/board_vendor',
+ 'board_version': '/sys/devices/virtual/dmi/id/board_version',
+ 'chassis_asset_tag': '/sys/devices/virtual/dmi/id/chassis_asset_tag',
+ 'chassis_serial': '/sys/devices/virtual/dmi/id/chassis_serial',
+ 'chassis_vendor': '/sys/devices/virtual/dmi/id/chassis_vendor',
+ 'chassis_version': '/sys/devices/virtual/dmi/id/chassis_version',
+ 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
+ 'product_name': '/sys/devices/virtual/dmi/id/product_name',
+ 'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
+ 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
+ 'product_version': '/sys/devices/virtual/dmi/id/product_version',
+ 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor',
+ }
+
+ for (key, path) in DMI_DICT.items():
+ data = get_file_content(path)
+ if data is not None:
+ if key == 'form_factor':
+ try:
+ dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
+ except IndexError:
+ dmi_facts['form_factor'] = 'unknown (%s)' % data
+ else:
+ dmi_facts[key] = data
+ else:
+ dmi_facts[key] = 'NA'
+
+ else:
+ # Fall back to using dmidecode, if available
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ DMI_DICT = {
+ 'bios_date': 'bios-release-date',
+ 'bios_vendor': 'bios-vendor',
+ 'bios_version': 'bios-version',
+ 'board_asset_tag': 'baseboard-asset-tag',
+ 'board_name': 'baseboard-product-name',
+ 'board_serial': 'baseboard-serial-number',
+ 'board_vendor': 'baseboard-manufacturer',
+ 'board_version': 'baseboard-version',
+ 'chassis_asset_tag': 'chassis-asset-tag',
+ 'chassis_serial': 'chassis-serial-number',
+ 'chassis_vendor': 'chassis-manufacturer',
+ 'chassis_version': 'chassis-version',
+ 'form_factor': 'chassis-type',
+ 'product_name': 'system-product-name',
+ 'product_serial': 'system-serial-number',
+ 'product_uuid': 'system-uuid',
+ 'product_version': 'system-version',
+ 'system_vendor': 'system-manufacturer',
+ }
+ for (k, v) in DMI_DICT.items():
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(thisvalue)
+ except UnicodeDecodeError:
+ thisvalue = "NA"
+
+ dmi_facts[k] = thisvalue
+ else:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+
+ return dmi_facts
+
+ def _run_lsblk(self, lsblk_path):
+ # call lsblk and collect all uuids
+ # --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
+ # this uses the linux major device number
+ # for details see https://www.kernel.org/doc/Documentation/devices.txt
+ args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
+ cmd = [lsblk_path] + args
+ rc, out, err = self.module.run_command(cmd)
+ return rc, out, err
+
+ def _lsblk_uuid(self):
+ uuids = {}
+ lsblk_path = self.module.get_bin_path("lsblk")
+ if not lsblk_path:
+ return uuids
+
+ rc, out, err = self._run_lsblk(lsblk_path)
+ if rc != 0:
+ return uuids
+
+ # each line will be in format:
+ # <devicename><some whitespace><uuid>
+ # /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+ for lsblk_line in out.splitlines():
+ if not lsblk_line:
+ continue
+
+ line = lsblk_line.strip()
+ fields = line.rsplit(None, 1)
+
+ if len(fields) < 2:
+ continue
+
+ device_name, uuid = fields[0].strip(), fields[1].strip()
+ if device_name in uuids:
+ continue
+ uuids[device_name] = uuid
+
+ return uuids
+
+ def _udevadm_uuid(self, device):
+ # fallback for versions of lsblk <= 2.23 that don't have --paths, see _run_lsblk() above
+ uuid = 'N/A'
+
+ udevadm_path = self.module.get_bin_path('udevadm')
+ if not udevadm_path:
+ return uuid
+
+ cmd = [udevadm_path, 'info', '--query', 'property', '--name', device]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ return uuid
+
+ # a snippet of the output of the udevadm command below will be:
+ # ...
+ # ID_FS_TYPE=ext4
+ # ID_FS_USAGE=filesystem
+ # ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179
+ # ...
+ m = re.search('ID_FS_UUID=(.*)\n', out)
+ if m:
+ uuid = m.group(1)
+
+ return uuid
+
+ def _run_findmnt(self, findmnt_path):
+ args = ['--list', '--noheadings', '--notruncate']
+ cmd = [findmnt_path] + args
+ rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
+ return rc, out, err
+
+ def _find_bind_mounts(self):
+ bind_mounts = set()
+ findmnt_path = self.module.get_bin_path("findmnt")
+ if not findmnt_path:
+ return bind_mounts
+
+ rc, out, err = self._run_findmnt(findmnt_path)
+ if rc != 0:
+ return bind_mounts
+
+ # find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
+ for line in out.splitlines():
+ fields = line.split()
+ # fields[0] is the TARGET, fields[1] is the SOURCE
+ if len(fields) < 2:
+ continue
+
+ # bind mounts will have a [/directory_name] in the SOURCE column
+ if self.BIND_MOUNT_RE.match(fields[1]):
+ bind_mounts.add(fields[0])
+
+ return bind_mounts
+
+ def _mtab_entries(self):
+ mtab_file = '/etc/mtab'
+ if not os.path.exists(mtab_file):
+ mtab_file = '/proc/mounts'
+
+ mtab = get_file_content(mtab_file, '')
+ mtab_entries = []
+ for line in mtab.splitlines():
+ fields = line.split()
+ if len(fields) < 4:
+ continue
+ mtab_entries.append(fields)
+ return mtab_entries
+
+ @staticmethod
+ def _replace_octal_escapes_helper(match):
+ # Convert to integer using base8 and then convert to character
+ return chr(int(match.group()[1:], 8))
+
+ def _replace_octal_escapes(self, value):
+ return self.OCTAL_ESCAPE_RE.sub(self._replace_octal_escapes_helper, value)
+
+ def get_mount_info(self, mount, device, uuids):
+
+ mount_size = get_mount_size(mount)
+
+ # _udevadm_uuid is a fallback for versions of lsblk <= 2.23 that don't have --paths
+ # see _run_lsblk() above
+ # https://github.com/ansible/ansible/issues/36077
+ uuid = uuids.get(device, self._udevadm_uuid(device))
+
+ return mount_size, uuid
+
+ def get_mount_facts(self):
+
+ mounts = []
+
+ # gather system lists
+ bind_mounts = self._find_bind_mounts()
+ uuids = self._lsblk_uuid()
+ mtab_entries = self._mtab_entries()
+
+ # start threads to query each mount
+ results = {}
+ pool = ThreadPool(processes=min(len(mtab_entries), cpu_count()))
+ maxtime = globals().get('GATHER_TIMEOUT') or timeout.DEFAULT_GATHER_TIMEOUT
+ for fields in mtab_entries:
+ # Transform octal escape sequences
+ fields = [self._replace_octal_escapes(field) for field in fields]
+
+ device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
+
+ if not device.startswith(('/', '\\')) and ':/' not in device or fstype == 'none':
+ continue
+
+ mount_info = {'mount': mount,
+ 'device': device,
+ 'fstype': fstype,
+ 'options': options}
+
+ if mount in bind_mounts:
+ # only add if not already there, we might have a plain /etc/mtab
+ if not self.MTAB_BIND_MOUNT_RE.match(options):
+ mount_info['options'] += ",bind"
+
+ results[mount] = {'info': mount_info,
+ 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)),
+ 'timelimit': time.time() + maxtime}
+
+ pool.close() # done with new workers, start gc
+
+ # wait for workers and get results
+ while results:
+ for mount in list(results):
+ done = False
+ res = results[mount]['extra']
+ try:
+ if res.ready():
+ done = True
+ if res.successful():
+ mount_size, uuid = res.get()
+ if mount_size:
+ results[mount]['info'].update(mount_size)
+ results[mount]['info']['uuid'] = uuid or 'N/A'
+ else:
+ # failed, try to find out why, if 'res.successful' we know there are no exceptions
+ results[mount]['info']['note'] = 'Could not get extra information: %s.' % (to_text(res.get()))
+
+ elif time.time() > results[mount]['timelimit']:
+ done = True
+ self.module.warn("Timeout exceeded when getting mount info for %s" % mount)
+ results[mount]['info']['note'] = 'Could not get extra information due to timeout'
+ except Exception as e:
+ import traceback
+ done = True
+ results[mount]['info'] = 'N/A'
+ self.module.warn("Error prevented getting extra info for mount %s: [%s] %s." % (mount, type(e), to_text(e)))
+ self.module.debug(traceback.format_exc())
+
+ if done:
+ # move results outside and make loop only handle pending
+ mounts.append(results[mount]['info'])
+ del results[mount]
+
+ # avoid cpu churn, sleep between retrying for loop with remaining mounts
+ time.sleep(0.1)
+
+ return {'mounts': mounts}
+
+ def get_device_links(self, link_dir):
+ if not os.path.exists(link_dir):
+ return {}
+ try:
+ retval = collections.defaultdict(set)
+ for entry in os.listdir(link_dir):
+ try:
+ target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
+ retval[target].add(entry)
+ except OSError:
+ continue
+ return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
+ except OSError:
+ return {}
+
+ def get_all_device_owners(self):
+ try:
+ retval = collections.defaultdict(set)
+ for path in glob.glob('/sys/block/*/slaves/*'):
+ elements = path.split('/')
+ device = elements[3]
+ target = elements[5]
+ retval[target].add(device)
+ return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
+ except OSError:
+ return {}
+
+ def get_all_device_links(self):
+ return {
+ 'ids': self.get_device_links('/dev/disk/by-id'),
+ 'uuids': self.get_device_links('/dev/disk/by-uuid'),
+ 'labels': self.get_device_links('/dev/disk/by-label'),
+ 'masters': self.get_all_device_owners(),
+ }
+
+ def get_holders(self, block_dev_dict, sysdir):
+ block_dev_dict['holders'] = []
+ if os.path.isdir(sysdir + "/holders"):
+ for folder in os.listdir(sysdir + "/holders"):
+ if not folder.startswith("dm-"):
+ continue
+ name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
+ if name:
+ block_dev_dict['holders'].append(name)
+ else:
+ block_dev_dict['holders'].append(folder)
+
+ def _get_sg_inq_serial(self, sg_inq, block):
+ device = "/dev/%s" % (block)
+ rc, drivedata, err = self.module.run_command([sg_inq, device])
+ if rc == 0:
+ serial = re.search(r"(?:Unit serial|Serial) number:\s+(\w+)", drivedata)
+ if serial:
+ return serial.group(1)
+
+ def get_device_facts(self):
+ device_facts = {}
+
+ device_facts['devices'] = {}
+ lspci = self.module.get_bin_path('lspci')
+ if lspci:
+ rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
+ else:
+ pcidata = None
+
+ try:
+ block_devs = os.listdir("/sys/block")
+ except OSError:
+ return device_facts
+
+ devs_wwn = {}
+ try:
+ devs_by_id = os.listdir("/dev/disk/by-id")
+ except OSError:
+ pass
+ else:
+ for link_name in devs_by_id:
+ if link_name.startswith("wwn-"):
+ try:
+ wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
+ except OSError:
+ continue
+ devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
+
+ links = self.get_all_device_links()
+ device_facts['device_links'] = links
+
+ for block in block_devs:
+ virtual = 1
+ sysfs_no_links = 0
+ try:
+ path = os.readlink(os.path.join("/sys/block/", block))
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.EINVAL:
+ path = block
+ sysfs_no_links = 1
+ else:
+ continue
+ sysdir = os.path.join("/sys/block", path)
+ if sysfs_no_links == 1:
+ for folder in os.listdir(sysdir):
+ if "device" in folder:
+ virtual = 0
+ break
+ d = {}
+ d['virtual'] = virtual
+ d['links'] = {}
+ for (link_type, link_values) in iteritems(links):
+ d['links'][link_type] = link_values.get(block, [])
+ diskname = os.path.basename(sysdir)
+ for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
+ d[key] = get_file_content(sysdir + "/device/" + key)
+
+ sg_inq = self.module.get_bin_path('sg_inq')
+
+ # we can get NVMe device's serial number from /sys/block/<name>/device/serial
+ serial_path = "/sys/block/%s/device/serial" % (block)
+
+ if sg_inq:
+ serial = self._get_sg_inq_serial(sg_inq, block)
+ if serial:
+ d['serial'] = serial
+ else:
+ serial = get_file_content(serial_path)
+ if serial:
+ d['serial'] = serial
+
+ for key, test in [('removable', '/removable'),
+ ('support_discard', '/queue/discard_granularity'),
+ ]:
+ d[key] = get_file_content(sysdir + test)
+
+ if diskname in devs_wwn:
+ d['wwn'] = devs_wwn[diskname]
+
+ d['partitions'] = {}
+ for folder in os.listdir(sysdir):
+ m = re.search("(" + diskname + r"[p]?\d+)", folder)
+ if m:
+ part = {}
+ partname = m.group(1)
+ part_sysdir = sysdir + "/" + partname
+
+ part['links'] = {}
+ for (link_type, link_values) in iteritems(links):
+ part['links'][link_type] = link_values.get(partname, [])
+
+ part['start'] = get_file_content(part_sysdir + "/start", 0)
+ part['sectors'] = get_file_content(part_sysdir + "/size", 0)
+
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
+ if not part['sectorsize']:
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
+ part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
+ part['uuid'] = get_partition_uuid(partname)
+ self.get_holders(part, part_sysdir)
+
+ d['partitions'][partname] = part
+
+ d['rotational'] = get_file_content(sysdir + "/queue/rotational")
+ d['scheduler_mode'] = ""
+ scheduler = get_file_content(sysdir + "/queue/scheduler")
+ if scheduler is not None:
+ m = re.match(r".*?(\[(.*)\])", scheduler)
+ if m:
+ d['scheduler_mode'] = m.group(2)
+
+ d['sectors'] = get_file_content(sysdir + "/size")
+ if not d['sectors']:
+ d['sectors'] = 0
+ d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
+ if not d['sectorsize']:
+ d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
+ d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
+
+ d['host'] = ""
+
+ # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
+ m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
+ if m and pcidata:
+ pciid = m.group(1)
+ did = re.escape(pciid)
+ m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE)
+ if m:
+ d['host'] = m.group(1)
+
+ self.get_holders(d, sysdir)
+
+ device_facts['devices'][diskname] = d
+
+ return device_facts
+
+ def get_uptime_facts(self):
+ uptime_facts = {}
+ uptime_file_content = get_file_content('/proc/uptime')
+ if uptime_file_content:
+ uptime_seconds_string = uptime_file_content.split(' ')[0]
+ uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
+
+ return uptime_facts
+
+ def _find_mapper_device_name(self, dm_device):
+ dm_prefix = '/dev/dm-'
+ mapper_device = dm_device
+ if dm_device.startswith(dm_prefix):
+ dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc == 0:
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+ def get_lvm_facts(self):
+ """ Get LVM Facts if running as root and lvm utils are available """
+
+ lvm_facts = {'lvm': 'N/A'}
+
+ if os.getuid() == 0 and self.module.get_bin_path('vgs'):
+ lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
+
+ vgs_path = self.module.get_bin_path('vgs')
+ # vgs fields: VG #PV #LV #SN Attr VSize VFree
+ vgs = {}
+ if vgs_path:
+ rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
+ for vg_line in vg_lines.splitlines():
+ items = vg_line.strip().split(',')
+ vgs[items[0]] = {'size_g': items[-2],
+ 'free_g': items[-1],
+ 'num_lvs': items[2],
+ 'num_pvs': items[1]}
+
+ lvs_path = self.module.get_bin_path('lvs')
+ # lvs fields:
+ # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
+ lvs = {}
+ if lvs_path:
+ rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
+ for lv_line in lv_lines.splitlines():
+ items = lv_line.strip().split(',')
+ lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
+
+ pvs_path = self.module.get_bin_path('pvs')
+ # pvs fields: PV VG #Fmt #Attr PSize PFree
+ pvs = {}
+ if pvs_path:
+ rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
+ for pv_line in pv_lines.splitlines():
+ items = pv_line.strip().split(',')
+ pvs[self._find_mapper_device_name(items[0])] = {
+ 'size_g': items[4],
+ 'free_g': items[5],
+ 'vg': items[1]}
+
+ lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
+
+ return lvm_facts
+
+
+class LinuxHardwareCollector(HardwareCollector):
+ _platform = 'Linux'
+ _fact_class = LinuxHardware
+
+ required_facts = set(['platform'])
diff --git a/lib/ansible/module_utils/facts/hardware/netbsd.py b/lib/ansible/module_utils/facts/hardware/netbsd.py
new file mode 100644
index 0000000..c6557aa
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/netbsd.py
@@ -0,0 +1,184 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import time
+
+from ansible.module_utils.six.moves import reduce
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.timeout import TimeoutError, timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class NetBSDHardware(Hardware):
+ """
+ NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - devices
+ - uptime_seconds
+ """
+ platform = 'NetBSD'
+ MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ self.sysctl = get_sysctl(self.module, ['machdep'])
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ dmi_facts = self.get_dmi_facts()
+ uptime_facts = self.get_uptime_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(mount_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(uptime_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+
+ i = 0
+ physid = 0
+ sockets = {}
+ if not os.access("/proc/cpuinfo", os.R_OK):
+ return cpu_facts
+ cpu_facts['processor'] = []
+ for line in get_file_lines("/proc/cpuinfo"):
+ data = line.split(":", 1)
+ key = data[0].strip()
+ # model name is for Intel arch, Processor (mind the uppercase P)
+ # works for some ARM devices, like the Sheevaplug.
+ if key == 'model name' or key == 'Processor':
+ if 'processor' not in cpu_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(data[1].strip())
+ i += 1
+ elif key == 'physical id':
+ physid = data[1].strip()
+ if physid not in sockets:
+ sockets[physid] = 1
+ elif key == 'cpu cores':
+ sockets[physid] = int(data[1].strip())
+ if len(sockets) > 0:
+ cpu_facts['processor_count'] = len(sockets)
+ cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
+ else:
+ cpu_facts['processor_count'] = i
+ cpu_facts['processor_cores'] = 'NA'
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ if not os.access("/proc/meminfo", os.R_OK):
+ return memory_facts
+ for line in get_file_lines("/proc/meminfo"):
+ data = line.split(":", 1)
+ key = data[0]
+ if key in NetBSDHardware.MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memory_facts["%s_mb" % key.lower()] = int(val) // 1024
+
+ return memory_facts
+
+ @timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+
+ if not fstab:
+ return mount_facts
+
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+ return mount_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+ # We don't use dmidecode(8) here because:
+ # - it would add dependency on an external package
+ # - dmidecode(8) can only be ran as root
+ # So instead we rely on sysctl(8) to provide us the information on a
+ # best-effort basis. As a bonus we also get facts on non-amd64/i386
+ # platforms this way.
+ sysctl_to_dmi = {
+ 'machdep.dmi.system-product': 'product_name',
+ 'machdep.dmi.system-version': 'product_version',
+ 'machdep.dmi.system-uuid': 'product_uuid',
+ 'machdep.dmi.system-serial': 'product_serial',
+ 'machdep.dmi.system-vendor': 'system_vendor',
+ }
+
+ for mib in sysctl_to_dmi:
+ if mib in self.sysctl:
+ dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
+
+ return dmi_facts
+
+ def get_uptime_facts(self):
+ # On NetBSD, we need to call sysctl with -n to get this value as an int.
+ sysctl_cmd = self.module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd, '-n', 'kern.boottime']
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc != 0:
+ return {}
+
+ kern_boottime = out.strip()
+ if not kern_boottime.isdigit():
+ return {}
+
+ return {
+ 'uptime_seconds': int(time.time() - int(kern_boottime)),
+ }
+
+
+class NetBSDHardwareCollector(HardwareCollector):
+ _fact_class = NetBSDHardware
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py
new file mode 100644
index 0000000..3bcf8ce
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/openbsd.py
@@ -0,0 +1,184 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+
+from ansible.module_utils._text import to_text
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts import timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class OpenBSDHardware(Hardware):
+ """
+ OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - processor_speed
+ - uptime_seconds
+
+ In addition, it also defines number of DMI facts and device facts.
+ """
+ platform = 'OpenBSD'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ self.sysctl = get_sysctl(self.module, ['hw'])
+
+ hardware_facts.update(self.get_processor_facts())
+ hardware_facts.update(self.get_memory_facts())
+ hardware_facts.update(self.get_device_facts())
+ hardware_facts.update(self.get_dmi_facts())
+ hardware_facts.update(self.get_uptime_facts())
+
+ # storage devices notorioslly prone to hang/block so they are under a timeout
+ try:
+ hardware_facts.update(self.get_mount_facts())
+ except timeout.TimeoutError:
+ pass
+
+ return hardware_facts
+
+ @timeout.timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+ if fstab:
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ if fields[1] == 'none' or fields[3] == 'xx':
+ continue
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+ return mount_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ # Get free memory. vmstat output looks like:
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
+ # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
+ rc, out, err = self.module.run_command("/usr/bin/vmstat")
+ if rc == 0:
+ memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
+ memory_facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
+
+ # Get swapctl info. swapctl output looks like:
+ # total: 69268 1K-blocks allocated, 0 used, 69268 available
+ # And for older OpenBSD:
+ # total: 69268k bytes allocated = 0k used, 69268k available
+ rc, out, err = self.module.run_command("/sbin/swapctl -sk")
+ if rc == 0:
+ swaptrans = {ord(u'k'): None,
+ ord(u'm'): None,
+ ord(u'g'): None}
+ data = to_text(out, errors='surrogate_or_strict').split()
+ memory_facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
+ memory_facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
+
+ return memory_facts
+
+ def get_uptime_facts(self):
+ # On openbsd, we need to call it with -n to get this value as an int.
+ sysctl_cmd = self.module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd, '-n', 'kern.boottime']
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc != 0:
+ return {}
+
+ kern_boottime = out.strip()
+ if not kern_boottime.isdigit():
+ return {}
+
+ return {
+ 'uptime_seconds': int(time.time() - int(kern_boottime)),
+ }
+
+ def get_processor_facts(self):
+ cpu_facts = {}
+ processor = []
+ for i in range(int(self.sysctl['hw.ncpuonline'])):
+ processor.append(self.sysctl['hw.model'])
+
+ cpu_facts['processor'] = processor
+ # The following is partly a lie because there is no reliable way to
+ # determine the number of physical CPUs in the system. We can only
+ # query the number of logical CPUs, which hides the number of cores.
+ # On amd64/i386 we could try to inspect the smt/core/package lines in
+ # dmesg, however even those have proven to be unreliable.
+ # So take a shortcut and report the logical number of processors in
+ # 'processor_count' and 'processor_cores' and leave it at that.
+ cpu_facts['processor_count'] = self.sysctl['hw.ncpuonline']
+ cpu_facts['processor_cores'] = self.sysctl['hw.ncpuonline']
+
+ return cpu_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+ devices = []
+ devices.extend(self.sysctl['hw.disknames'].split(','))
+ device_facts['devices'] = devices
+
+ return device_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+ # We don't use dmidecode(8) here because:
+ # - it would add dependency on an external package
+ # - dmidecode(8) can only be ran as root
+ # So instead we rely on sysctl(8) to provide us the information on a
+ # best-effort basis. As a bonus we also get facts on non-amd64/i386
+ # platforms this way.
+ sysctl_to_dmi = {
+ 'hw.product': 'product_name',
+ 'hw.version': 'product_version',
+ 'hw.uuid': 'product_uuid',
+ 'hw.serialno': 'product_serial',
+ 'hw.vendor': 'system_vendor',
+ }
+
+ for mib in sysctl_to_dmi:
+ if mib in self.sysctl:
+ dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
+
+ return dmi_facts
+
+
+class OpenBSDHardwareCollector(HardwareCollector):
+ _fact_class = OpenBSDHardware
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/sunos.py b/lib/ansible/module_utils/facts/hardware/sunos.py
new file mode 100644
index 0000000..0a77db0
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/sunos.py
@@ -0,0 +1,286 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.text.formatters import bytes_to_human
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts import timeout
+from ansible.module_utils.six.moves import reduce
+
+
+class SunOSHardware(Hardware):
+ """
+ In addition to the generic memory and cpu facts, this also sets
+ swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
+ """
+ platform = 'SunOS'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ # FIXME: could pass to run_command(environ_update), but it also tweaks the env
+ # of the parent process instead of altering an env provided to Popen()
+ # Use C locale for hardware collection helpers to avoid locale specific number formatting (#24542)
+ locale = get_best_parsable_locale(self.module)
+ self.module.run_command_environ_update = {'LANG': locale, 'LC_ALL': locale, 'LC_NUMERIC': locale}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+ uptime_facts = self.get_uptime_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except timeout.TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ physid = 0
+ sockets = {}
+
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
+
+ cpu_facts['processor'] = []
+
+ for line in out.splitlines():
+ if len(line) < 1:
+ continue
+
+ data = line.split(None, 1)
+ key = data[0].strip()
+
+ # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
+ if key == 'module:':
+ brand = ''
+ elif key == 'brand':
+ brand = data[1].strip()
+ elif key == 'clock_MHz':
+ clock_mhz = data[1].strip()
+ elif key == 'implementation':
+ processor = brand or data[1].strip()
+ # Add clock speed to description for SPARC CPU
+ # FIXME
+ if collected_facts.get('ansible_machine') != 'i86pc':
+ processor += " @ " + clock_mhz + "MHz"
+ if 'ansible_processor' not in collected_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(processor)
+ elif key == 'chip_id':
+ physid = data[1].strip()
+ if physid not in sockets:
+ sockets[physid] = 1
+ else:
+ sockets[physid] += 1
+
+ # Counting cores on Solaris can be complicated.
+ # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
+ # Treat 'processor_count' as physical sockets and 'processor_cores' as
+ # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
+ # these processors have: sockets -> cores -> threads/virtual CPU.
+ if len(sockets) > 0:
+ cpu_facts['processor_count'] = len(sockets)
+ cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
+ else:
+ cpu_facts['processor_cores'] = 'NA'
+ cpu_facts['processor_count'] = len(cpu_facts['processor'])
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+
+ rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
+
+ for line in out.splitlines():
+ if 'Memory size' in line:
+ memory_facts['memtotal_mb'] = int(line.split()[2])
+
+ rc, out, err = self.module.run_command("/usr/sbin/swap -s")
+
+ allocated = int(out.split()[1][:-1])
+ reserved = int(out.split()[5][:-1])
+ used = int(out.split()[8][:-1])
+ free = int(out.split()[10][:-1])
+
+ memory_facts['swapfree_mb'] = free // 1024
+ memory_facts['swaptotal_mb'] = (free + used) // 1024
+ memory_facts['swap_allocated_mb'] = allocated // 1024
+ memory_facts['swap_reserved_mb'] = reserved // 1024
+
+ return memory_facts
+
+ @timeout.timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+ mount_facts['mounts'] = []
+
+ # For a detailed format description see mnttab(4)
+ # special mount_point fstype options time
+ fstab = get_file_content('/etc/mnttab')
+
+ if fstab:
+ for line in fstab.splitlines():
+ fields = line.split('\t')
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3],
+ 'time': fields[4]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+
+ return mount_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+
+ # On Solaris 8 the prtdiag wrapper is absent from /usr/sbin,
+ # but that's okay, because we know where to find the real thing:
+ rc, platform, err = self.module.run_command('/usr/bin/uname -i')
+ platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin'
+
+ prtdiag_path = self.module.get_bin_path("prtdiag", opt_dirs=[platform_sbin])
+ rc, out, err = self.module.run_command(prtdiag_path)
+ """
+ rc returns 1
+ """
+ if out:
+ system_conf = out.split('\n')[0]
+
+ # If you know of any other manufacturers whose names appear in
+ # the first line of prtdiag's output, please add them here:
+ vendors = [
+ "Fujitsu",
+ "Oracle Corporation",
+ "QEMU",
+ "Sun Microsystems",
+ "VMware, Inc.",
+ ]
+ vendor_regexp = "|".join(map(re.escape, vendors))
+ system_conf_regexp = (r'System Configuration:\s+'
+ + r'(' + vendor_regexp + r')\s+'
+ + r'(?:sun\w+\s+)?'
+ + r'(.+)')
+
+ found = re.match(system_conf_regexp, system_conf)
+ if found:
+ dmi_facts['system_vendor'] = found.group(1)
+ dmi_facts['product_name'] = found.group(2)
+
+ return dmi_facts
+
+ def get_device_facts(self):
+ # Device facts are derived for sdderr kstats. This code does not use the
+ # full output, but rather queries for specific stats.
+ # Example output:
+ # sderr:0:sd0,err:Hard Errors 0
+ # sderr:0:sd0,err:Illegal Request 6
+ # sderr:0:sd0,err:Media Error 0
+ # sderr:0:sd0,err:Predictive Failure Analysis 0
+ # sderr:0:sd0,err:Product VBOX HARDDISK 9
+ # sderr:0:sd0,err:Revision 1.0
+ # sderr:0:sd0,err:Serial No VB0ad2ec4d-074a
+ # sderr:0:sd0,err:Size 53687091200
+ # sderr:0:sd0,err:Soft Errors 0
+ # sderr:0:sd0,err:Transport Errors 0
+ # sderr:0:sd0,err:Vendor ATA
+
+ device_facts = {}
+ device_facts['devices'] = {}
+
+ disk_stats = {
+ 'Product': 'product',
+ 'Revision': 'revision',
+ 'Serial No': 'serial',
+ 'Size': 'size',
+ 'Vendor': 'vendor',
+ 'Hard Errors': 'hard_errors',
+ 'Soft Errors': 'soft_errors',
+ 'Transport Errors': 'transport_errors',
+ 'Media Error': 'media_errors',
+ 'Predictive Failure Analysis': 'predictive_failure_analysis',
+ 'Illegal Request': 'illegal_request',
+ }
+
+ cmd = ['/usr/bin/kstat', '-p']
+
+ for ds in disk_stats:
+ cmd.append('sderr:::%s' % ds)
+
+ d = {}
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ return device_facts
+
+ sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr'))
+ for instance in sd_instances:
+ lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance)
+ for line in lines:
+ text, value = line.split('\t')
+ stat = text.split(':')[3]
+
+ if stat == 'Size':
+ d[disk_stats.get(stat)] = bytes_to_human(float(value))
+ else:
+ d[disk_stats.get(stat)] = value.rstrip()
+
+ diskname = 'sd' + instance
+ device_facts['devices'][diskname] = d
+ d = {}
+
+ return device_facts
+
+ def get_uptime_facts(self):
+ uptime_facts = {}
+ # sample kstat output:
+ # unix:0:system_misc:boot_time 1548249689
+ rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:boot_time')
+
+ if rc != 0:
+ return
+
+ # uptime = $current_time - $boot_time
+ uptime_facts['uptime_seconds'] = int(time.time() - int(out.split('\t')[1]))
+
+ return uptime_facts
+
+
+class SunOSHardwareCollector(HardwareCollector):
+ _fact_class = SunOSHardware
+ _platform = 'SunOS'
+
+ required_facts = set(['platform'])
diff --git a/lib/ansible/module_utils/facts/namespace.py b/lib/ansible/module_utils/facts/namespace.py
new file mode 100644
index 0000000..2d6bf8a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/namespace.py
@@ -0,0 +1,51 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class FactNamespace:
+ def __init__(self, namespace_name):
+ self.namespace_name = namespace_name
+
+ def transform(self, name):
+ '''Take a text name, and transforms it as needed (add a namespace prefix, etc)'''
+ return name
+
+ def _underscore(self, name):
+ return name.replace('-', '_')
+
+
+class PrefixFactNamespace(FactNamespace):
+ def __init__(self, namespace_name, prefix=None):
+ super(PrefixFactNamespace, self).__init__(namespace_name)
+ self.prefix = prefix
+
+ def transform(self, name):
+ new_name = self._underscore(name)
+ return '%s%s' % (self.prefix, new_name)
diff --git a/lib/ansible/module_utils/facts/network/__init__.py b/lib/ansible/module_utils/facts/network/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/__init__.py
diff --git a/lib/ansible/module_utils/facts/network/aix.py b/lib/ansible/module_utils/facts/network/aix.py
new file mode 100644
index 0000000..e9c90c6
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/aix.py
@@ -0,0 +1,145 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class AIXNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the AIX Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'AIX'
+
+ def get_default_interfaces(self, route_path):
+ interface = dict(v4={}, v6={})
+
+ netstat_path = self.module.get_bin_path('netstat')
+
+ if netstat_path:
+ rc, out, err = self.module.run_command([netstat_path, '-nr'])
+
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1 and words[0] == 'default':
+ if '.' in words[1]:
+ interface['v4']['gateway'] = words[1]
+ interface['v4']['interface'] = words[5]
+ elif ':' in words[1]:
+ interface['v6']['gateway'] = words[1]
+ interface['v6']['interface'] = words[5]
+
+ return interface['v4'], interface['v6']
+
+ # AIX 'ifconfig -a' does not have three words in the interface line
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+
+ uname_rc = None
+ uname_out = None
+ uname_err = None
+ uname_path = self.module.get_bin_path('uname')
+ if uname_path:
+ uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
+
+ rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ # only this condition differs from GenericBsdIfconfigNetwork
+ if re.match(r'^\w*\d*:', line):
+ current_if = self.parse_interface_line(words)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ # don't bother with wpars it does not work
+ # zero means not in wpar
+ if not uname_rc and uname_out.split()[0] == '0':
+
+ if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
+ entstat_path = self.module.get_bin_path('entstat')
+ if entstat_path:
+ rc, out, err = self.module.run_command([entstat_path, current_if['device']])
+ if rc != 0:
+ break
+ for line in out.splitlines():
+ if not line:
+ pass
+ buff = re.match('^Hardware Address: (.*)', line)
+ if buff:
+ current_if['macaddress'] = buff.group(1)
+
+ buff = re.match('^Device Type:', line)
+ if buff and re.match('.*Ethernet', line):
+ current_if['type'] = 'ether'
+
+ # device must have mtu attribute in ODM
+ if 'mtu' not in current_if:
+ lsattr_path = self.module.get_bin_path('lsattr')
+ if lsattr_path:
+ rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']])
+ if rc != 0:
+ break
+ for line in out.splitlines():
+ if line:
+ words = line.split()
+ if words[0] == 'mtu':
+ current_if['mtu'] = words[1]
+ return interfaces, ips
+
+ # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
+ def parse_interface_line(self, words):
+ device = words[0][0:-1]
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ current_if['flags'] = self.get_options(words[1])
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+ return current_if
+
+
+class AIXNetworkCollector(NetworkCollector):
+ _fact_class = AIXNetwork
+ _platform = 'AIX'
diff --git a/lib/ansible/module_utils/facts/network/base.py b/lib/ansible/module_utils/facts/network/base.py
new file mode 100644
index 0000000..8243f06
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/base.py
@@ -0,0 +1,72 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Network:
+ """
+ This is a generic Network subclass of Facts. This should be further
+ subclassed to implement per platform. If you subclass this,
+ you must define:
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+
+ All subclasses MUST define platform.
+ """
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init when we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ # TODO: more or less abstract/NotImplemented
+ def populate(self, collected_facts=None):
+ return {}
+
+
+class NetworkCollector(BaseFactCollector):
+ # MAYBE: we could try to build this based on the arch specific implementation of Network() or its kin
+ name = 'network'
+ _fact_class = Network
+ _fact_ids = set(['interfaces',
+ 'default_ipv4',
+ 'default_ipv6',
+ 'all_ipv4_addresses',
+ 'all_ipv6_addresses']) # type: t.Set[str]
+
+ IPV6_SCOPE = {'0': 'global',
+ '10': 'host',
+ '20': 'link',
+ '40': 'admin',
+ '50': 'site',
+ '80': 'organization'}
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/network/darwin.py b/lib/ansible/module_utils/facts/network/darwin.py
new file mode 100644
index 0000000..90117e5
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/darwin.py
@@ -0,0 +1,49 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class DarwinNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the Mac macOS Darwin Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged
+ """
+ platform = 'Darwin'
+
+ # media line is different to the default FreeBSD one
+ def parse_media_line(self, words, current_if, ips):
+ # not sure if this is useful - we also drop information
+ current_if['media'] = 'Unknown' # Mac does not give us this
+ current_if['media_select'] = words[1]
+ if len(words) > 2:
+ # MacOSX sets the media to '<unknown type>' for bridge interface
+ # and parsing splits this into two words; this if/else helps
+ if words[1] == '<unknown' and words[2] == 'type>':
+ current_if['media_select'] = 'Unknown'
+ current_if['media_type'] = 'unknown type'
+ else:
+ current_if['media_type'] = words[2][1:-1]
+ if len(words) > 3:
+ current_if['media_options'] = self.get_options(words[3])
+
+
+class DarwinNetworkCollector(NetworkCollector):
+ _fact_class = DarwinNetwork
+ _platform = 'Darwin'
diff --git a/lib/ansible/module_utils/facts/network/dragonfly.py b/lib/ansible/module_utils/facts/network/dragonfly.py
new file mode 100644
index 0000000..e43bbb2
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/dragonfly.py
@@ -0,0 +1,33 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class DragonFlyNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the DragonFly Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'DragonFly'
+
+
+class DragonFlyNetworkCollector(NetworkCollector):
+ _fact_class = DragonFlyNetwork
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py
new file mode 100644
index 0000000..86182f8
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/fc_wwn.py
@@ -0,0 +1,111 @@
+# Fibre Channel WWN initiator related facts collection for ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import glob
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_lines
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FcWwnInitiatorFactCollector(BaseFactCollector):
+ name = 'fibre_channel_wwn'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Example contents /sys/class/fc_host/*/port_name:
+
+ 0x21000014ff52a9bb
+
+ """
+
+ fc_facts = {}
+ fc_facts['fibre_channel_wwn'] = []
+ if sys.platform.startswith('linux'):
+ for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
+ for line in get_file_lines(fcfile):
+ fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
+ elif sys.platform.startswith('sunos'):
+ """
+ on solaris 10 or solaris 11 should use `fcinfo hba-port`
+ TBD (not implemented): on solaris 9 use `prtconf -pv`
+ """
+ cmd = module.get_bin_path('fcinfo')
+ if cmd:
+ cmd = cmd + " hba-port"
+ rc, fcinfo_out, err = module.run_command(cmd)
+ """
+ # fcinfo hba-port | grep "Port WWN"
+ HBA Port WWN: 10000090fa1658de
+ """
+ if rc == 0 and fcinfo_out:
+ for line in fcinfo_out.splitlines():
+ if 'Port WWN' in line:
+ data = line.split(' ')
+ fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
+ elif sys.platform.startswith('aix'):
+ cmd = module.get_bin_path('lsdev')
+ lscfg_cmd = module.get_bin_path('lscfg')
+ if cmd and lscfg_cmd:
+ # get list of available fibre-channel devices (fcs)
+ cmd = cmd + " -Cc adapter -l fcs*"
+ rc, lsdev_out, err = module.run_command(cmd)
+ if rc == 0 and lsdev_out:
+ for line in lsdev_out.splitlines():
+ # if device is available (not in defined state), get its WWN
+ if 'Available' in line:
+ data = line.split(' ')
+ cmd = lscfg_cmd + " -vl %s" % data[0]
+ rc, lscfg_out, err = module.run_command(cmd)
+ # example output
+ # lscfg -vpl fcs3 | grep "Network Address"
+ # Network Address.............10000090FA551509
+ if rc == 0 and lscfg_out:
+ for line in lscfg_out.splitlines():
+ if 'Network Address' in line:
+ data = line.split('.')
+ fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
+ elif sys.platform.startswith('hp-ux'):
+ cmd = module.get_bin_path('ioscan')
+ fcmsu_cmd = module.get_bin_path('fcmsutil', opt_dirs=['/opt/fcms/bin'])
+ # go ahead if we have both commands available
+ if cmd and fcmsu_cmd:
+ # ioscan / get list of available fibre-channel devices (fcd)
+ cmd = cmd + " -fnC FC"
+ rc, ioscan_out, err = module.run_command(cmd)
+ if rc == 0 and ioscan_out:
+ for line in ioscan_out.splitlines():
+ line = line.strip()
+ if '/dev/fcd' in line:
+ dev = line.split(' ')
+ # get device information
+ cmd = fcmsu_cmd + " %s" % dev[0]
+ rc, fcmsutil_out, err = module.run_command(cmd)
+ # lookup the following line
+ # N_Port Port World Wide Name = 0x50060b00006975ec
+ if rc == 0 and fcmsutil_out:
+ for line in fcmsutil_out.splitlines():
+ if 'N_Port Port World Wide Name' in line:
+ data = line.split('=')
+ fc_facts['fibre_channel_wwn'].append(data[-1].strip())
+ return fc_facts
diff --git a/lib/ansible/module_utils/facts/network/freebsd.py b/lib/ansible/module_utils/facts/network/freebsd.py
new file mode 100644
index 0000000..36f6eec
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/freebsd.py
@@ -0,0 +1,33 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class FreeBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the FreeBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'FreeBSD'
+
+
+class FreeBSDNetworkCollector(NetworkCollector):
+ _fact_class = FreeBSDNetwork
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/network/generic_bsd.py b/lib/ansible/module_utils/facts/network/generic_bsd.py
new file mode 100644
index 0000000..8d640f2
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/generic_bsd.py
@@ -0,0 +1,321 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import socket
+import struct
+
+from ansible.module_utils.facts.network.base import Network
+
+
+class GenericBsdIfconfigNetwork(Network):
+ """
+ This is a generic BSD subclass of Network using the ifconfig command.
+ It defines
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+ - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
+ """
+ platform = 'Generic_BSD_Ifconfig'
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ ifconfig_path = self.module.get_bin_path('ifconfig')
+
+ if ifconfig_path is None:
+ return network_facts
+
+ route_path = self.module.get_bin_path('route')
+
+ if route_path is None:
+ return network_facts
+
+ default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
+ interfaces, ips = self.get_interfaces_info(ifconfig_path)
+ interfaces = self.detect_type_media(interfaces)
+
+ self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
+ self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
+ network_facts['interfaces'] = sorted(list(interfaces.keys()))
+
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+
+ network_facts['default_ipv4'] = default_ipv4
+ network_facts['default_ipv6'] = default_ipv6
+ network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
+ network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
+
+ return network_facts
+
+ def detect_type_media(self, interfaces):
+ for iface in interfaces:
+ if 'media' in interfaces[iface]:
+ if 'ether' in interfaces[iface]['media'].lower():
+ interfaces[iface]['type'] = 'ether'
+ return interfaces
+
+ def get_default_interfaces(self, route_path):
+
+ # Use the commands:
+ # route -n get default
+ # route -n get -inet6 default
+ # to find out the default outgoing interface, address, and gateway
+
+ command = dict(v4=[route_path, '-n', 'get', 'default'],
+ v6=[route_path, '-n', 'get', '-inet6', 'default'])
+
+ interface = dict(v4={}, v6={})
+
+ for v in 'v4', 'v6':
+
+ if v == 'v6' and not socket.has_ipv6:
+ continue
+ rc, out, err = self.module.run_command(command[v])
+ if not out:
+ # v6 routing may result in
+ # RTNETLINK answers: Invalid argument
+ continue
+ for line in out.splitlines():
+ words = line.strip().split(': ')
+ # Collect output from route command
+ if len(words) > 1:
+ if words[0] == 'interface':
+ interface[v]['interface'] = words[1]
+ if words[0] == 'gateway':
+ interface[v]['gateway'] = words[1]
+ # help pick the right interface address on OpenBSD
+ if words[0] == 'if address':
+ interface[v]['address'] = words[1]
+ # help pick the right interface address on NetBSD
+ if words[0] == 'local addr':
+ interface[v]['address'] = words[1]
+
+ return interface['v4'], interface['v6']
+
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+ # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and macOS all implicitly add '-a'
+ # when running the command 'ifconfig'.
+ # Solaris must explicitly run the command 'ifconfig -a'.
+ rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ if words[0] == 'pass':
+ continue
+ elif re.match(r'^\S', line) and len(words) > 3:
+ current_if = self.parse_interface_line(words)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ elif words[0] == 'tunnel':
+ self.parse_tunnel_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ return interfaces, ips
+
+ def parse_interface_line(self, words):
+ device = words[0][0:-1]
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ current_if['flags'] = self.get_options(words[1])
+ if 'LOOPBACK' in current_if['flags']:
+ current_if['type'] = 'loopback'
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+
+ if len(words) >= 5: # Newer FreeBSD versions
+ current_if['metric'] = words[3]
+ current_if['mtu'] = words[5]
+ else:
+ current_if['mtu'] = words[3]
+
+ return current_if
+
+ def parse_options_line(self, words, current_if, ips):
+ # Mac has options like this...
+ current_if['options'] = self.get_options(words[0])
+
+ def parse_nd6_line(self, words, current_if, ips):
+ # FreeBSD has options like this...
+ current_if['options'] = self.get_options(words[1])
+
+ def parse_ether_line(self, words, current_if, ips):
+ current_if['macaddress'] = words[1]
+ current_if['type'] = 'ether'
+
+ def parse_media_line(self, words, current_if, ips):
+ # not sure if this is useful - we also drop information
+ current_if['media'] = words[1]
+ if len(words) > 2:
+ current_if['media_select'] = words[2]
+ if len(words) > 3:
+ current_if['media_type'] = words[3][1:]
+ if len(words) > 4:
+ current_if['media_options'] = self.get_options(words[4])
+
+ def parse_status_line(self, words, current_if, ips):
+ current_if['status'] = words[1]
+
+ def parse_lladdr_line(self, words, current_if, ips):
+ current_if['lladdr'] = words[1]
+
+ def parse_inet_line(self, words, current_if, ips):
+ # netbsd show aliases like this
+ # lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
+ # inet 127.0.0.1 netmask 0xff000000
+ # inet alias 127.1.1.1 netmask 0xff000000
+ if words[1] == 'alias':
+ del words[1]
+
+ address = {'address': words[1]}
+ # cidr style ip address (eg, 127.0.0.1/24) in inet line
+ # used in netbsd ifconfig -e output after 7.1
+ if '/' in address['address']:
+ ip_address, cidr_mask = address['address'].split('/')
+
+ address['address'] = ip_address
+
+ netmask_length = int(cidr_mask)
+ netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
+ address['netmask'] = socket.inet_ntoa(struct.pack('!L', netmask_bin))
+
+ if len(words) > 5:
+ address['broadcast'] = words[3]
+
+ else:
+ # Don't just assume columns, use "netmask" as the index for the prior column
+ try:
+ netmask_idx = words.index('netmask') + 1
+ except ValueError:
+ netmask_idx = 3
+
+ # deal with hex netmask
+ if re.match('([0-9a-f]){8}$', words[netmask_idx]):
+ netmask = '0x' + words[netmask_idx]
+ else:
+ netmask = words[netmask_idx]
+
+ if netmask.startswith('0x'):
+ address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(netmask, base=16)))
+ else:
+ # otherwise assume this is a dotted quad
+ address['netmask'] = netmask
+ # calculate the network
+ address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
+ netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
+ address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
+ if 'broadcast' not in address:
+ # broadcast may be given or we need to calculate
+ try:
+ broadcast_idx = words.index('broadcast') + 1
+ except ValueError:
+ address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
+ else:
+ address['broadcast'] = words[broadcast_idx]
+
+ # add to our list of addresses
+ if not words[1].startswith('127.'):
+ ips['all_ipv4_addresses'].append(address['address'])
+ current_if['ipv4'].append(address)
+
+ def parse_inet6_line(self, words, current_if, ips):
+ address = {'address': words[1]}
+
+ # using cidr style addresses, ala NetBSD ifconfig post 7.1
+ if '/' in address['address']:
+ ip_address, cidr_mask = address['address'].split('/')
+
+ address['address'] = ip_address
+ address['prefix'] = cidr_mask
+
+ if len(words) > 5:
+ address['scope'] = words[5]
+ else:
+ if (len(words) >= 4) and (words[2] == 'prefixlen'):
+ address['prefix'] = words[3]
+ if (len(words) >= 6) and (words[4] == 'scopeid'):
+ address['scope'] = words[5]
+
+ localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
+ if address['address'] not in localhost6:
+ ips['all_ipv6_addresses'].append(address['address'])
+ current_if['ipv6'].append(address)
+
+ def parse_tunnel_line(self, words, current_if, ips):
+ current_if['type'] = 'tunnel'
+
+ def parse_unknown_line(self, words, current_if, ips):
+ # we are going to ignore unknown lines here - this may be
+ # a bad idea - but you can override it in your subclass
+ pass
+
+ # TODO: these are module scope static function candidates
+ # (most of the class is really...)
+ def get_options(self, option_string):
+ start = option_string.find('<') + 1
+ end = option_string.rfind('>')
+ if (start > 0) and (end > 0) and (end > start + 1):
+ option_csv = option_string[start:end]
+ return option_csv.split(',')
+ else:
+ return []
+
+ def merge_default_interface(self, defaults, interfaces, ip_type):
+ if 'interface' not in defaults:
+ return
+ if not defaults['interface'] in interfaces:
+ return
+ ifinfo = interfaces[defaults['interface']]
+ # copy all the interface values across except addresses
+ for item in ifinfo:
+ if item != 'ipv4' and item != 'ipv6':
+ defaults[item] = ifinfo[item]
+
+ ipinfo = []
+ if 'address' in defaults:
+ ipinfo = [x for x in ifinfo[ip_type] if x['address'] == defaults['address']]
+
+ if len(ipinfo) == 0:
+ ipinfo = ifinfo[ip_type]
+
+ if len(ipinfo) > 0:
+ for item in ipinfo[0]:
+ defaults[item] = ipinfo[0][item]
diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py
new file mode 100644
index 0000000..add57be
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/hpux.py
@@ -0,0 +1,82 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+
+class HPUXNetwork(Network):
+ """
+ HP-UX-specifig subclass of Network. Defines networking facts:
+ - default_interface
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4 address information.
+ """
+ platform = 'HP-UX'
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ netstat_path = self.module.get_bin_path('netstat')
+
+ if netstat_path is None:
+ return network_facts
+
+ default_interfaces_facts = self.get_default_interfaces()
+ network_facts.update(default_interfaces_facts)
+
+ interfaces = self.get_interfaces_info()
+ network_facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+
+ return network_facts
+
+ def get_default_interfaces(self):
+ default_interfaces = {}
+ rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1:
+ if words[0] == 'default':
+ default_interfaces['default_interface'] = words[4]
+ default_interfaces['default_gateway'] = words[1]
+
+ return default_interfaces
+
+ def get_interfaces_info(self):
+ interfaces = {}
+ rc, out, err = self.module.run_command("/usr/bin/netstat -niw")
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ for i in range(len(words) - 1):
+ if words[i][:3] == 'lan':
+ device = words[i]
+ interfaces[device] = {'device': device}
+ address = words[i + 3]
+ interfaces[device]['ipv4'] = {'address': address}
+ network = words[i + 2]
+ interfaces[device]['ipv4'] = {'network': network,
+ 'interface': device,
+ 'address': address}
+ return interfaces
+
+
+class HPUXNetworkCollector(NetworkCollector):
+ _fact_class = HPUXNetwork
+ _platform = 'HP-UX'
diff --git a/lib/ansible/module_utils/facts/network/hurd.py b/lib/ansible/module_utils/facts/network/hurd.py
new file mode 100644
index 0000000..518df39
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/hurd.py
@@ -0,0 +1,87 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+
+class HurdPfinetNetwork(Network):
+ """
+ This is a GNU Hurd specific subclass of Network. It use fsysopts to
+ get the ip address and support only pfinet.
+ """
+ platform = 'GNU'
+ _socket_dir = '/servers/socket/'
+
+ def assign_network_facts(self, network_facts, fsysopts_path, socket_path):
+ rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
+ # FIXME: build up a interfaces datastructure, then assign into network_facts
+ network_facts['interfaces'] = []
+ for i in out.split():
+ if '=' in i and i.startswith('--'):
+ k, v = i.split('=', 1)
+ # remove '--'
+ k = k[2:]
+ if k == 'interface':
+ # remove /dev/ from /dev/eth0
+ v = v[5:]
+ network_facts['interfaces'].append(v)
+ network_facts[v] = {
+ 'active': True,
+ 'device': v,
+ 'ipv4': {},
+ 'ipv6': [],
+ }
+ current_if = v
+ elif k == 'address':
+ network_facts[current_if]['ipv4']['address'] = v
+ elif k == 'netmask':
+ network_facts[current_if]['ipv4']['netmask'] = v
+ elif k == 'address6':
+ address, prefix = v.split('/')
+ network_facts[current_if]['ipv6'].append({
+ 'address': address,
+ 'prefix': prefix,
+ })
+ return network_facts
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+
+ fsysopts_path = self.module.get_bin_path('fsysopts')
+ if fsysopts_path is None:
+ return network_facts
+
+ socket_path = None
+
+ for l in ('inet', 'inet6'):
+ link = os.path.join(self._socket_dir, l)
+ if os.path.exists(link):
+ socket_path = link
+ break
+
+ if socket_path is None:
+ return network_facts
+
+ return self.assign_network_facts(network_facts, fsysopts_path, socket_path)
+
+
+class HurdNetworkCollector(NetworkCollector):
+ _platform = 'GNU'
+ _fact_class = HurdPfinetNetwork
diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py
new file mode 100644
index 0000000..2bb9383
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/iscsi.py
@@ -0,0 +1,115 @@
+# iSCSI initiator related facts collection for Ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import subprocess
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.network.base import NetworkCollector
+
+
+class IscsiInitiatorNetworkCollector(NetworkCollector):
+ name = 'iscsi'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Example of contents of /etc/iscsi/initiatorname.iscsi:
+
+ ## DO NOT EDIT OR REMOVE THIS FILE!
+ ## If you remove this file, the iSCSI daemon will not start.
+ ## If you change the InitiatorName, existing access control lists
+ ## may reject this initiator. The InitiatorName must be unique
+ ## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
+ InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b
+
+ Example of output from the AIX lsattr command:
+
+ # lsattr -E -l iscsi0
+ disc_filename /etc/iscsi/targets Configuration file False
+ disc_policy file Discovery Policy True
+ initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
+ isns_srvnames auto iSNS Servers IP Addresses True
+ isns_srvports iSNS Servers Port Numbers True
+ max_targets 16 Maximum Targets Allowed True
+ num_cmd_elems 200 Maximum number of commands to queue to driver True
+
+ Example of output from the HP-UX iscsiutil command:
+
+ #iscsiutil -l
+ Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537
+ Initiator Alias :
+
+ Authentication Method : None
+ CHAP Method : CHAP_UNI
+ Initiator CHAP Name :
+ CHAP Secret :
+ NAS Hostname :
+ NAS Secret :
+ Radius Server Hostname :
+ Header Digest : None, CRC32C (default)
+ Data Digest : None, CRC32C (default)
+ SLP Scope list for iSLPD :
+ """
+
+ iscsi_facts = {}
+ iscsi_facts['iscsi_iqn'] = ""
+ if sys.platform.startswith('linux') or sys.platform.startswith('sunos'):
+ for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ if line.startswith('InitiatorName='):
+ iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
+ break
+ elif sys.platform.startswith('aix'):
+ try:
+ cmd = get_bin_path('lsattr')
+ except ValueError:
+ return iscsi_facts
+
+ cmd += " -E -l iscsi0"
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and out:
+ line = self.findstr(out, 'initiator_name')
+ iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
+
+ elif sys.platform.startswith('hp-ux'):
+ # try to find it in the default PATH and opt_dirs
+ try:
+ cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
+ except ValueError:
+ return iscsi_facts
+
+ cmd += " -l"
+ rc, out, err = module.run_command(cmd)
+ if out:
+ line = self.findstr(out, 'Initiator Name')
+ iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip()
+
+ return iscsi_facts
+
+ def findstr(self, text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
diff --git a/lib/ansible/module_utils/facts/network/linux.py b/lib/ansible/module_utils/facts/network/linux.py
new file mode 100644
index 0000000..b7ae976
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/linux.py
@@ -0,0 +1,327 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import re
+import socket
+import struct
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+from ansible.module_utils.facts.utils import get_file_content
+
+
+class LinuxNetwork(Network):
+ """
+ This is a Linux-specific subclass of Network. It defines
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+ - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
+ - ipv4_address and ipv6_address: the first non-local address for each family.
+ """
+ platform = 'Linux'
+ INTERFACE_TYPE = {
+ '1': 'ether',
+ '32': 'infiniband',
+ '512': 'ppp',
+ '772': 'loopback',
+ '65534': 'tunnel',
+ }
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ ip_path = self.module.get_bin_path('ip')
+ if ip_path is None:
+ return network_facts
+ default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path,
+ collected_facts=collected_facts)
+ interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
+ network_facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+ network_facts['default_ipv4'] = default_ipv4
+ network_facts['default_ipv6'] = default_ipv6
+ network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
+ network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
+ return network_facts
+
+ def get_default_interfaces(self, ip_path, collected_facts=None):
+ collected_facts = collected_facts or {}
+ # Use the commands:
+ # ip -4 route get 8.8.8.8 -> Google public DNS
+ # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
+ # to find out the default outgoing interface, address, and gateway
+ command = dict(
+ v4=[ip_path, '-4', 'route', 'get', '8.8.8.8'],
+ v6=[ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
+ )
+ interface = dict(v4={}, v6={})
+
+ for v in 'v4', 'v6':
+ if (v == 'v6' and collected_facts.get('ansible_os_family') == 'RedHat' and
+ collected_facts.get('ansible_distribution_version', '').startswith('4.')):
+ continue
+ if v == 'v6' and not socket.has_ipv6:
+ continue
+ rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace')
+ if not out:
+ # v6 routing may result in
+ # RTNETLINK answers: Invalid argument
+ continue
+ words = out.splitlines()[0].split()
+ # A valid output starts with the queried address on the first line
+ if len(words) > 0 and words[0] == command[v][-1]:
+ for i in range(len(words) - 1):
+ if words[i] == 'dev':
+ interface[v]['interface'] = words[i + 1]
+ elif words[i] == 'src':
+ interface[v]['address'] = words[i + 1]
+ elif words[i] == 'via' and words[i + 1] != command[v][-1]:
+ interface[v]['gateway'] = words[i + 1]
+ return interface['v4'], interface['v6']
+
+ def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
+ interfaces = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+
+ # FIXME: maybe split into smaller methods?
+ # FIXME: this is pretty much a constructor
+
+ for path in glob.glob('/sys/class/net/*'):
+ if not os.path.isdir(path):
+ continue
+ device = os.path.basename(path)
+ interfaces[device] = {'device': device}
+ if os.path.exists(os.path.join(path, 'address')):
+ macaddress = get_file_content(os.path.join(path, 'address'), default='')
+ if macaddress and macaddress != '00:00:00:00:00:00':
+ interfaces[device]['macaddress'] = macaddress
+ if os.path.exists(os.path.join(path, 'mtu')):
+ interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
+ if os.path.exists(os.path.join(path, 'operstate')):
+ interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
+ if os.path.exists(os.path.join(path, 'device', 'driver', 'module')):
+ interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
+ if os.path.exists(os.path.join(path, 'type')):
+ _type = get_file_content(os.path.join(path, 'type'))
+ interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
+ if os.path.exists(os.path.join(path, 'bridge')):
+ interfaces[device]['type'] = 'bridge'
+ interfaces[device]['interfaces'] = [os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*'))]
+ if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
+ interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
+ if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
+ interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
+ if os.path.exists(os.path.join(path, 'bonding')):
+ interfaces[device]['type'] = 'bonding'
+ interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
+ interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
+ interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
+ interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
+ primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
+ if primary:
+ interfaces[device]['primary'] = primary
+ path = os.path.join(path, 'bonding', 'all_slaves_active')
+ if os.path.exists(path):
+ interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
+ if os.path.exists(os.path.join(path, 'bonding_slave')):
+ interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='')
+ if os.path.exists(os.path.join(path, 'device')):
+ interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path, 'device')))
+ if os.path.exists(os.path.join(path, 'speed')):
+ speed = get_file_content(os.path.join(path, 'speed'))
+ if speed is not None:
+ interfaces[device]['speed'] = int(speed)
+
+ # Check whether an interface is in promiscuous mode
+ if os.path.exists(os.path.join(path, 'flags')):
+ promisc_mode = False
+ # The second byte indicates whether the interface is in promiscuous mode.
+ # 1 = promisc
+ # 0 = no promisc
+ data = int(get_file_content(os.path.join(path, 'flags')), 16)
+ promisc_mode = (data & 0x0100 > 0)
+ interfaces[device]['promisc'] = promisc_mode
+
+ # TODO: determine if this needs to be in a nested scope/closure
+ def parse_ip_output(output, secondary=False):
+ for line in output.splitlines():
+ if not line:
+ continue
+ words = line.split()
+ broadcast = ''
+ if words[0] == 'inet':
+ if '/' in words[1]:
+ address, netmask_length = words[1].split('/')
+ if len(words) > 3:
+ if words[2] == 'brd':
+ broadcast = words[3]
+ else:
+ # pointopoint interfaces do not have a prefix
+ address = words[1]
+ netmask_length = "32"
+ address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
+ netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
+ netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
+ network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
+ iface = words[-1]
+ # NOTE: device is ref to outside scope
+ # NOTE: interfaces is also ref to outside scope
+ if iface != device:
+ interfaces[iface] = {}
+ if not secondary and "ipv4" not in interfaces[iface]:
+ interfaces[iface]['ipv4'] = {'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ 'prefix': netmask_length,
+ }
+ else:
+ if "ipv4_secondaries" not in interfaces[iface]:
+ interfaces[iface]["ipv4_secondaries"] = []
+ interfaces[iface]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ 'prefix': netmask_length,
+ })
+
+ # add this secondary IP to the main device
+ if secondary:
+ if "ipv4_secondaries" not in interfaces[device]:
+ interfaces[device]["ipv4_secondaries"] = []
+ if device != iface:
+ interfaces[device]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ 'prefix': netmask_length,
+ })
+
+ # NOTE: default_ipv4 is ref to outside scope
+ # If this is the default address, update default_ipv4
+ if 'address' in default_ipv4 and default_ipv4['address'] == address:
+ default_ipv4['broadcast'] = broadcast
+ default_ipv4['netmask'] = netmask
+ default_ipv4['network'] = network
+ default_ipv4['prefix'] = netmask_length
+ # NOTE: macaddress is ref from outside scope
+ default_ipv4['macaddress'] = macaddress
+ default_ipv4['mtu'] = interfaces[device]['mtu']
+ default_ipv4['type'] = interfaces[device].get("type", "unknown")
+ default_ipv4['alias'] = words[-1]
+ if not address.startswith('127.'):
+ ips['all_ipv4_addresses'].append(address)
+ elif words[0] == 'inet6':
+ if 'peer' == words[2]:
+ address = words[1]
+ _, prefix = words[3].split('/')
+ scope = words[5]
+ else:
+ address, prefix = words[1].split('/')
+ scope = words[3]
+ if 'ipv6' not in interfaces[device]:
+ interfaces[device]['ipv6'] = []
+ interfaces[device]['ipv6'].append({
+ 'address': address,
+ 'prefix': prefix,
+ 'scope': scope
+ })
+ # If this is the default address, update default_ipv6
+ if 'address' in default_ipv6 and default_ipv6['address'] == address:
+ default_ipv6['prefix'] = prefix
+ default_ipv6['scope'] = scope
+ default_ipv6['macaddress'] = macaddress
+ default_ipv6['mtu'] = interfaces[device]['mtu']
+ default_ipv6['type'] = interfaces[device].get("type", "unknown")
+ if not address == '::1':
+ ips['all_ipv6_addresses'].append(address)
+
+ ip_path = self.module.get_bin_path("ip")
+
+ args = [ip_path, 'addr', 'show', 'primary', 'dev', device]
+ rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(primary_data)
+ else:
+ # possibly busybox, fallback to running without the "primary" arg
+ # https://github.com/ansible/ansible/issues/50871
+ args = [ip_path, 'addr', 'show', 'dev', device]
+ rc, data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(data)
+
+ args = [ip_path, 'addr', 'show', 'secondary', 'dev', device]
+ rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(secondary_data, secondary=True)
+
+ interfaces[device].update(self.get_ethtool_data(device))
+
+ # replace : by _ in interface name since they are hard to use in template
+ new_interfaces = {}
+ # i is a dict key (string) not an index int
+ for i in interfaces:
+ if ':' in i:
+ new_interfaces[i.replace(':', '_')] = interfaces[i]
+ else:
+ new_interfaces[i] = interfaces[i]
+ return new_interfaces, ips
+
+ def get_ethtool_data(self, device):
+
+ data = {}
+ ethtool_path = self.module.get_bin_path("ethtool")
+ # FIXME: exit early on falsey ethtool_path and un-indent
+ if ethtool_path:
+ args = [ethtool_path, '-k', device]
+ rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ # FIXME: exit early on falsey if we can
+ if rc == 0:
+ features = {}
+ for line in stdout.strip().splitlines():
+ if not line or line.endswith(":"):
+ continue
+ key, value = line.split(": ")
+ if not value:
+ continue
+ features[key.strip().replace('-', '_')] = value.strip()
+ data['features'] = features
+
+ args = [ethtool_path, '-T', device]
+ rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ data['timestamping'] = [m.lower() for m in re.findall(r'SOF_TIMESTAMPING_(\w+)', stdout)]
+ data['hw_timestamp_filters'] = [m.lower() for m in re.findall(r'HWTSTAMP_FILTER_(\w+)', stdout)]
+ m = re.search(r'PTP Hardware Clock: (\d+)', stdout)
+ if m:
+ data['phc_index'] = int(m.groups()[0])
+
+ return data
+
+
+class LinuxNetworkCollector(NetworkCollector):
+ _platform = 'Linux'
+ _fact_class = LinuxNetwork
+ required_facts = set(['distribution', 'platform'])
diff --git a/lib/ansible/module_utils/facts/network/netbsd.py b/lib/ansible/module_utils/facts/network/netbsd.py
new file mode 100644
index 0000000..de8ceff
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/netbsd.py
@@ -0,0 +1,48 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class NetBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the NetBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork
+ """
+ platform = 'NetBSD'
+
+ def parse_media_line(self, words, current_if, ips):
+ # example of line:
+ # $ ifconfig
+ # ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
+ # ec_capabilities=1<VLAN_MTU>
+ # ec_enabled=0
+ # address: 00:20:91:45:00:78
+ # media: Ethernet 10baseT full-duplex
+ # inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
+ current_if['media'] = words[1]
+ if len(words) > 2:
+ current_if['media_type'] = words[2]
+ if len(words) > 3:
+ current_if['media_options'] = words[3].split(',')
+
+
+class NetBSDNetworkCollector(NetworkCollector):
+ _fact_class = NetBSDNetwork
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/network/nvme.py b/lib/ansible/module_utils/facts/network/nvme.py
new file mode 100644
index 0000000..febd0ab
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/nvme.py
@@ -0,0 +1,57 @@
+# NVMe initiator related facts collection for Ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import subprocess
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.network.base import NetworkCollector
+
+
+class NvmeInitiatorNetworkCollector(NetworkCollector):
+ name = 'nvme'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Currently NVMe is only supported in some Linux distributions.
+ If NVMe is configured on the host then a file will have been created
+ during the NVMe driver installation. This file holds the unique NQN
+ of the host.
+
+ Example of contents of /etc/nvme/hostnqn:
+
+ # cat /etc/nvme/hostnqn
+ nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d
+
+ """
+
+ nvme_facts = {}
+ nvme_facts['hostnqn'] = ""
+ if sys.platform.startswith('linux'):
+ for line in get_file_content('/etc/nvme/hostnqn', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ if line.startswith('nqn.'):
+ nvme_facts['hostnqn'] = line
+ break
+ return nvme_facts
diff --git a/lib/ansible/module_utils/facts/network/openbsd.py b/lib/ansible/module_utils/facts/network/openbsd.py
new file mode 100644
index 0000000..9e11d82
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/openbsd.py
@@ -0,0 +1,42 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class OpenBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the OpenBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork.
+ """
+ platform = 'OpenBSD'
+
+ # OpenBSD 'ifconfig -a' does not have information about aliases
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
+ return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
+
+ # Return macaddress instead of lladdr
+ def parse_lladdr_line(self, words, current_if, ips):
+ current_if['macaddress'] = words[1]
+ current_if['type'] = 'ether'
+
+
+class OpenBSDNetworkCollector(NetworkCollector):
+ _fact_class = OpenBSDNetwork
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/network/sunos.py b/lib/ansible/module_utils/facts/network/sunos.py
new file mode 100644
index 0000000..adba14c
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/sunos.py
@@ -0,0 +1,116 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class SunOSNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the SunOS Network Class.
+ It uses the GenericBsdIfconfigNetwork.
+
+ Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
+ so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
+ """
+ platform = 'SunOS'
+
+ # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
+ # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
+ # 'parse_interface_line()' checks for previously seen interfaces before defining
+ # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
+ def get_interfaces_info(self, ifconfig_path):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+ rc, out, err = self.module.run_command([ifconfig_path, '-a'])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ if re.match(r'^\S', line) and len(words) > 3:
+ current_if = self.parse_interface_line(words, current_if, interfaces)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
+ # ipv4/ipv6 lists which is ugly and hard to read.
+ # This quick hack merges the dictionaries. Purely cosmetic.
+ for iface in interfaces:
+ for v in 'ipv4', 'ipv6':
+ combined_facts = {}
+ for facts in interfaces[iface][v]:
+ combined_facts.update(facts)
+ if len(combined_facts.keys()) > 0:
+ interfaces[iface][v] = [combined_facts]
+
+ return interfaces, ips
+
+ def parse_interface_line(self, words, current_if, interfaces):
+ device = words[0][0:-1]
+ if device not in interfaces:
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ else:
+ current_if = interfaces[device]
+ flags = self.get_options(words[1])
+ v = 'ipv4'
+ if 'IPv6' in flags:
+ v = 'ipv6'
+ if 'LOOPBACK' in flags:
+ current_if['type'] = 'loopback'
+ current_if[v].append({'flags': flags, 'mtu': words[3]})
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+ return current_if
+
+ # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
+ # Add leading zero to each octet where needed.
+ def parse_ether_line(self, words, current_if, ips):
+ macaddress = ''
+ for octet in words[1].split(':'):
+ octet = ('0' + octet)[-2:None]
+ macaddress += (octet + ':')
+ current_if['macaddress'] = macaddress[0:-1]
+
+
+class SunOSNetworkCollector(NetworkCollector):
+ _fact_class = SunOSNetwork
+ _platform = 'SunOS'
diff --git a/lib/ansible/module_utils/facts/other/__init__.py b/lib/ansible/module_utils/facts/other/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/__init__.py
diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py
new file mode 100644
index 0000000..3f83999
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/facter.py
@@ -0,0 +1,87 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FacterFactCollector(BaseFactCollector):
+ name = 'facter'
+ _fact_ids = set(['facter']) # type: t.Set[str]
+
+ def __init__(self, collectors=None, namespace=None):
+ namespace = PrefixFactNamespace(namespace_name='facter',
+ prefix='facter_')
+ super(FacterFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ def find_facter(self, module):
+ facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
+ cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
+
+ # Prefer to use cfacter if available
+ if cfacter_path is not None:
+ facter_path = cfacter_path
+
+ return facter_path
+
+ def run_facter(self, module, facter_path):
+ # if facter is installed, and we can use --json because
+ # ruby-json is ALSO installed, include facter data in the JSON
+ rc, out, err = module.run_command(facter_path + " --puppet --json")
+ return rc, out, err
+
+ def get_facter_output(self, module):
+ facter_path = self.find_facter(module)
+ if not facter_path:
+ return None
+
+ rc, out, err = self.run_facter(module, facter_path)
+
+ if rc != 0:
+ return None
+
+ return out
+
+ def collect(self, module=None, collected_facts=None):
+ # Note that this mirrors previous facter behavior, where there isnt
+ # a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
+ # items are added to the main dict.
+ facter_dict = {}
+
+ if not module:
+ return facter_dict
+
+ facter_output = self.get_facter_output(module)
+
+ # TODO: if we fail, should we add a empty facter key or nothing?
+ if facter_output is None:
+ return facter_dict
+
+ try:
+ facter_dict = json.loads(facter_output)
+ except Exception:
+ # FIXME: maybe raise a FactCollectorError with some info attrs?
+ pass
+
+ return facter_dict
diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py
new file mode 100644
index 0000000..90c5539
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/ohai.py
@@ -0,0 +1,74 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class OhaiFactCollector(BaseFactCollector):
+ '''This is a subclass of Facts for including information gathered from Ohai.'''
+ name = 'ohai'
+ _fact_ids = set() # type: t.Set[str]
+
+ def __init__(self, collectors=None, namespace=None):
+ namespace = PrefixFactNamespace(namespace_name='ohai',
+ prefix='ohai_')
+ super(OhaiFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ def find_ohai(self, module):
+ ohai_path = module.get_bin_path('ohai')
+ return ohai_path
+
+ def run_ohai(self, module, ohai_path,):
+ rc, out, err = module.run_command(ohai_path)
+ return rc, out, err
+
+ def get_ohai_output(self, module):
+ ohai_path = self.find_ohai(module)
+ if not ohai_path:
+ return None
+
+ rc, out, err = self.run_ohai(module, ohai_path)
+ if rc != 0:
+ return None
+
+ return out
+
+ def collect(self, module=None, collected_facts=None):
+ ohai_facts = {}
+ if not module:
+ return ohai_facts
+
+ ohai_output = self.get_ohai_output(module)
+
+ if ohai_output is None:
+ return ohai_facts
+
+ try:
+ ohai_facts = json.loads(ohai_output)
+ except Exception:
+ # FIXME: useful error, logging, something...
+ pass
+
+ return ohai_facts
diff --git a/lib/ansible/module_utils/facts/packages.py b/lib/ansible/module_utils/facts/packages.py
new file mode 100644
index 0000000..53f74a1
--- /dev/null
+++ b/lib/ansible/module_utils/facts/packages.py
@@ -0,0 +1,86 @@
+# (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from abc import ABCMeta, abstractmethod
+
+from ansible.module_utils.six import with_metaclass
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common._utils import get_all_subclasses
+
+
+def get_all_pkg_managers():
+
+ return {obj.__name__.lower(): obj for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)}
+
+
+class PkgMgr(with_metaclass(ABCMeta, object)): # type: ignore[misc]
+
+ @abstractmethod
+ def is_available(self):
+ # This method is supposed to return True/False if the package manager is currently installed/usable
+ # It can also 'prep' the required systems in the process of detecting availability
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ # This method should return a list of installed packages, each list item will be passed to get_package_details
+ pass
+
+ @abstractmethod
+ def get_package_details(self, package):
+ # This takes a 'package' item and returns a dictionary with the package information, name and version are minimal requirements
+ pass
+
+ def get_packages(self):
+ # Take all of the above and return a dictionary of lists of dictionaries (package = list of installed versions)
+
+ installed_packages = {}
+ for package in self.list_installed():
+ package_details = self.get_package_details(package)
+ if 'source' not in package_details:
+ package_details['source'] = self.__class__.__name__.lower()
+ name = package_details['name']
+ if name not in installed_packages:
+ installed_packages[name] = [package_details]
+ else:
+ installed_packages[name].append(package_details)
+ return installed_packages
+
+
+class LibMgr(PkgMgr):
+
+ LIB = None # type: str | None
+
+ def __init__(self):
+
+ self._lib = None
+ super(LibMgr, self).__init__()
+
+ def is_available(self):
+ found = False
+ try:
+ self._lib = __import__(self.LIB)
+ found = True
+ except ImportError:
+ pass
+ return found
+
+
+class CLIMgr(PkgMgr):
+
+ CLI = None # type: str | None
+
+ def __init__(self):
+
+ self._cli = None
+ super(CLIMgr, self).__init__()
+
+ def is_available(self):
+ try:
+ self._cli = get_bin_path(self.CLI)
+ except ValueError:
+ return False
+ return True
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
new file mode 100644
index 0000000..2c55d77
--- /dev/null
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -0,0 +1,62 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils._text import to_text
+
+
+def get_sysctl(module, prefixes):
+ sysctl_cmd = module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd]
+ cmd.extend(prefixes)
+
+ sysctl = dict()
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except (IOError, OSError) as e:
+ module.warn('Unable to read sysctl: %s' % to_text(e))
+ rc = 1
+
+ if rc == 0:
+ key = ''
+ value = ''
+ for line in out.splitlines():
+ if not line.strip():
+ continue
+
+ if line.startswith(' '):
+ # handle multiline values, they will not have a starting key
+ # Add the newline back in so people can split on it to parse
+ # lines if they need to.
+ value += '\n' + line
+ continue
+
+ if key:
+ sysctl[key] = value.strip()
+
+ try:
+ (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
+ except Exception as e:
+ module.warn('Unable to split sysctl line (%s): %s' % (to_text(line), to_text(e)))
+
+ if key:
+ sysctl[key] = value.strip()
+
+ return sysctl
diff --git a/lib/ansible/module_utils/facts/system/__init__.py b/lib/ansible/module_utils/facts/system/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/__init__.py
diff --git a/lib/ansible/module_utils/facts/system/apparmor.py b/lib/ansible/module_utils/facts/system/apparmor.py
new file mode 100644
index 0000000..3b702f9
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/apparmor.py
@@ -0,0 +1,41 @@
+# Collect facts related to apparmor
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class ApparmorFactCollector(BaseFactCollector):
+ name = 'apparmor'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ apparmor_facts = {}
+ if os.path.exists('/sys/kernel/security/apparmor'):
+ apparmor_facts['status'] = 'enabled'
+ else:
+ apparmor_facts['status'] = 'disabled'
+
+ facts_dict['apparmor'] = apparmor_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/caps.py b/lib/ansible/module_utils/facts/system/caps.py
new file mode 100644
index 0000000..6a1e26d
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/caps.py
@@ -0,0 +1,62 @@
+# Collect facts related to systems 'capabilities' via capsh
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class SystemCapabilitiesFactCollector(BaseFactCollector):
+ name = 'caps'
+ _fact_ids = set(['system_capabilities',
+ 'system_capabilities_enforced']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+
+ rc = -1
+ facts_dict = {'system_capabilities_enforced': 'N/A',
+ 'system_capabilities': 'N/A'}
+ if module:
+ capsh_path = module.get_bin_path('capsh')
+ if capsh_path:
+ # NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl
+ try:
+ rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace', handle_exceptions=False)
+ except (IOError, OSError) as e:
+ module.warn('Could not query system capabilities: %s' % str(e))
+
+ if rc == 0:
+ enforced_caps = []
+ enforced = 'NA'
+ for line in out.splitlines():
+ if len(line) < 1:
+ continue
+ if line.startswith('Current:'):
+ if line.split(':')[1].strip() == '=ep':
+ enforced = 'False'
+ else:
+ enforced = 'True'
+ enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
+
+ facts_dict['system_capabilities_enforced'] = enforced
+ facts_dict['system_capabilities'] = enforced_caps
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/chroot.py b/lib/ansible/module_utils/facts/system/chroot.py
new file mode 100644
index 0000000..94138a0
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/chroot.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+def is_chroot(module=None):
+
+ is_chroot = None
+
+ if os.environ.get('debian_chroot', False):
+ is_chroot = True
+ else:
+ my_root = os.stat('/')
+ try:
+ # check if my file system is the root one
+ proc_root = os.stat('/proc/1/root/.')
+ is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev
+ except Exception:
+ # I'm not root or no proc, fallback to checking it is inode #2
+ fs_root_ino = 2
+
+ if module is not None:
+ stat_path = module.get_bin_path('stat')
+ if stat_path:
+ cmd = [stat_path, '-f', '--format=%T', '/']
+ rc, out, err = module.run_command(cmd)
+ if 'btrfs' in out:
+ fs_root_ino = 256
+ elif 'xfs' in out:
+ fs_root_ino = 128
+
+ is_chroot = (my_root.st_ino != fs_root_ino)
+
+ return is_chroot
+
+
+class ChrootFactCollector(BaseFactCollector):
+ name = 'chroot'
+ _fact_ids = set(['is_chroot']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ return {'is_chroot': is_chroot(module)}
diff --git a/lib/ansible/module_utils/facts/system/cmdline.py b/lib/ansible/module_utils/facts/system/cmdline.py
new file mode 100644
index 0000000..782186d
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/cmdline.py
@@ -0,0 +1,81 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shlex
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class CmdLineFactCollector(BaseFactCollector):
+ name = 'cmdline'
+ _fact_ids = set() # type: t.Set[str]
+
+ def _get_proc_cmdline(self):
+ return get_file_content('/proc/cmdline')
+
+ def _parse_proc_cmdline(self, data):
+ cmdline_dict = {}
+ try:
+ for piece in shlex.split(data, posix=False):
+ item = piece.split('=', 1)
+ if len(item) == 1:
+ cmdline_dict[item[0]] = True
+ else:
+ cmdline_dict[item[0]] = item[1]
+ except ValueError:
+ pass
+
+ return cmdline_dict
+
+ def _parse_proc_cmdline_facts(self, data):
+ cmdline_dict = {}
+ try:
+ for piece in shlex.split(data, posix=False):
+ item = piece.split('=', 1)
+ if len(item) == 1:
+ cmdline_dict[item[0]] = True
+ else:
+ if item[0] in cmdline_dict:
+ if isinstance(cmdline_dict[item[0]], list):
+ cmdline_dict[item[0]].append(item[1])
+ else:
+ new_list = [cmdline_dict[item[0]], item[1]]
+ cmdline_dict[item[0]] = new_list
+ else:
+ cmdline_dict[item[0]] = item[1]
+ except ValueError:
+ pass
+
+ return cmdline_dict
+
+ def collect(self, module=None, collected_facts=None):
+ cmdline_facts = {}
+
+ data = self._get_proc_cmdline()
+
+ if not data:
+ return cmdline_facts
+
+ cmdline_facts['cmdline'] = self._parse_proc_cmdline(data)
+ cmdline_facts['proc_cmdline'] = self._parse_proc_cmdline_facts(data)
+
+ return cmdline_facts
diff --git a/lib/ansible/module_utils/facts/system/date_time.py b/lib/ansible/module_utils/facts/system/date_time.py
new file mode 100644
index 0000000..481bef4
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/date_time.py
@@ -0,0 +1,70 @@
+# Data and time related facts collection for ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import time
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class DateTimeFactCollector(BaseFactCollector):
+ name = 'date_time'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ date_time_facts = {}
+
+ # Store the timestamp once, then get local and UTC versions from that
+ epoch_ts = time.time()
+ now = datetime.datetime.fromtimestamp(epoch_ts)
+ utcnow = datetime.datetime.utcfromtimestamp(epoch_ts)
+
+ date_time_facts['year'] = now.strftime('%Y')
+ date_time_facts['month'] = now.strftime('%m')
+ date_time_facts['weekday'] = now.strftime('%A')
+ date_time_facts['weekday_number'] = now.strftime('%w')
+ date_time_facts['weeknumber'] = now.strftime('%W')
+ date_time_facts['day'] = now.strftime('%d')
+ date_time_facts['hour'] = now.strftime('%H')
+ date_time_facts['minute'] = now.strftime('%M')
+ date_time_facts['second'] = now.strftime('%S')
+ date_time_facts['epoch'] = now.strftime('%s')
+ # epoch returns float or string in some non-linux environments
+ if date_time_facts['epoch'] == '' or date_time_facts['epoch'][0] == '%':
+ date_time_facts['epoch'] = str(int(epoch_ts))
+ # epoch_int always returns integer format of epoch
+ date_time_facts['epoch_int'] = str(int(now.strftime('%s')))
+ if date_time_facts['epoch_int'] == '' or date_time_facts['epoch_int'][0] == '%':
+ date_time_facts['epoch_int'] = str(int(epoch_ts))
+ date_time_facts['date'] = now.strftime('%Y-%m-%d')
+ date_time_facts['time'] = now.strftime('%H:%M:%S')
+ date_time_facts['iso8601_micro'] = utcnow.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ date_time_facts['iso8601'] = utcnow.strftime("%Y-%m-%dT%H:%M:%SZ")
+ date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
+ date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
+ date_time_facts['tz'] = time.strftime("%Z")
+ date_time_facts['tz_dst'] = time.tzname[1]
+ date_time_facts['tz_offset'] = time.strftime("%z")
+
+ facts_dict['date_time'] = date_time_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
new file mode 100644
index 0000000..dcb6e5a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -0,0 +1,726 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import re
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
+ get_distribution_codename
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+def get_uname(module, flags=('-v')):
+ if isinstance(flags, str):
+ flags = flags.split()
+ command = ['uname']
+ command.extend(flags)
+ rc, out, err = module.run_command(command)
+ if rc == 0:
+ return out
+ return None
+
+
+def _file_exists(path, allow_empty=False):
+ # not finding the file, exit early
+ if not os.path.exists(path):
+ return False
+
+ # if just the path needs to exists (ie, it can be empty) we are done
+ if allow_empty:
+ return True
+
+ # file exists but is empty and we dont allow_empty
+ if os.path.getsize(path) == 0:
+ return False
+
+ # file exists with some content
+ return True
+
+
+class DistributionFiles:
+ '''has-a various distro file parsers (os-release, etc) and logic for finding the right one.'''
+ # every distribution name mentioned here, must have one of
+ # - allowempty == True
+ # - be listed in SEARCH_STRING
+ # - have a function get_distribution_DISTNAME implemented
+ # keep names in sync with Conditionals page of docs
+ OSDIST_LIST = (
+ {'path': '/etc/altlinux-release', 'name': 'Altlinux'},
+ {'path': '/etc/oracle-release', 'name': 'OracleLinux'},
+ {'path': '/etc/slackware-version', 'name': 'Slackware'},
+ {'path': '/etc/centos-release', 'name': 'CentOS'},
+ {'path': '/etc/redhat-release', 'name': 'RedHat'},
+ {'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
+ {'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
+ {'path': '/etc/os-release', 'name': 'Amazon'},
+ {'path': '/etc/system-release', 'name': 'Amazon'},
+ {'path': '/etc/alpine-release', 'name': 'Alpine'},
+ {'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
+ {'path': '/etc/os-release', 'name': 'Archlinux'},
+ {'path': '/etc/os-release', 'name': 'SUSE'},
+ {'path': '/etc/SuSE-release', 'name': 'SUSE'},
+ {'path': '/etc/gentoo-release', 'name': 'Gentoo'},
+ {'path': '/etc/os-release', 'name': 'Debian'},
+ {'path': '/etc/lsb-release', 'name': 'Debian'},
+ {'path': '/etc/lsb-release', 'name': 'Mandriva'},
+ {'path': '/etc/sourcemage-release', 'name': 'SMGL'},
+ {'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
+ {'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
+ {'path': '/etc/os-release', 'name': 'Flatcar'},
+ {'path': '/etc/os-release', 'name': 'NA'},
+ )
+
+ SEARCH_STRING = {
+ 'OracleLinux': 'Oracle Linux',
+ 'RedHat': 'Red Hat',
+ 'Altlinux': 'ALT',
+ 'SMGL': 'Source Mage GNU/Linux',
+ }
+
+ # We can't include this in SEARCH_STRING because a name match on its keys
+ # causes a fallback to using the first whitespace separated item from the file content
+ # as the name. For os-release, that is in form 'NAME=Arch'
+ OS_RELEASE_ALIAS = {
+ 'Archlinux': 'Arch Linux'
+ }
+
+ STRIP_QUOTES = r'\'\"\\'
+
+ def __init__(self, module):
+ self.module = module
+
+ def _get_file_content(self, path):
+ return get_file_content(path)
+
+ def _get_dist_file_content(self, path, allow_empty=False):
+ # cant find that dist file or it is incorrectly empty
+ if not _file_exists(path, allow_empty=allow_empty):
+ return False, None
+
+ data = self._get_file_content(path)
+ return True, data
+
+ def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
+ dist_file_dict = {}
+ dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
+ if name in self.SEARCH_STRING:
+ # look for the distribution string in the data and replace according to RELEASE_NAME_MAP
+ # only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
+ if self.SEARCH_STRING[name] in dist_file_content:
+ # this sets distribution=RedHat if 'Red Hat' shows up in data
+ dist_file_dict['distribution'] = name
+ dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
+ else:
+ # this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
+ dist_file_dict['distribution'] = dist_file_content.split()[0]
+
+ return True, dist_file_dict
+
+ if name in self.OS_RELEASE_ALIAS:
+ if self.OS_RELEASE_ALIAS[name] in dist_file_content:
+ dist_file_dict['distribution'] = name
+ return True, dist_file_dict
+ return False, dist_file_dict
+
+ # call a dedicated function for parsing the file content
+ # TODO: replace with a map or a class
+ try:
+ # FIXME: most of these dont actually look at the dist file contents, but random other stuff
+ distfunc_name = 'parse_distribution_file_' + name
+ distfunc = getattr(self, distfunc_name)
+ parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
+ return parsed, dist_file_dict
+ except AttributeError as exc:
+ self.module.debug('exc: %s' % exc)
+ # this should never happen, but if it does fail quietly and not with a traceback
+ return False, dist_file_dict
+
+ return True, dist_file_dict
+ # to debug multiple matching release files, one can use:
+ # self.facts['distribution_debug'].append({path + ' ' + name:
+ # (parsed,
+ # self.facts['distribution'],
+ # self.facts['distribution_version'],
+ # self.facts['distribution_release'],
+ # )})
+
+ def _guess_distribution(self):
+ # try to find out which linux distribution this is
+ dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
+ distribution_guess = {
+ 'distribution': dist[0] or 'NA',
+ 'distribution_version': dist[1] or 'NA',
+ # distribution_release can be the empty string
+ 'distribution_release': 'NA' if dist[2] is None else dist[2]
+ }
+
+ distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
+ return distribution_guess
+
+ def process_dist_files(self):
+ # Try to handle the exceptions now ...
+ # self.facts['distribution_debug'] = []
+ dist_file_facts = {}
+
+ dist_guess = self._guess_distribution()
+ dist_file_facts.update(dist_guess)
+
+ for ddict in self.OSDIST_LIST:
+ name = ddict['name']
+ path = ddict['path']
+ allow_empty = ddict.get('allowempty', False)
+
+ has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
+
+ # but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
+ # /etc/os-release with a different name
+ if has_dist_file and allow_empty:
+ dist_file_facts['distribution'] = name
+ dist_file_facts['distribution_file_path'] = path
+ dist_file_facts['distribution_file_variety'] = name
+ break
+
+ if not has_dist_file:
+ # keep looking
+ continue
+
+ parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
+
+ # finally found the right os dist file and were able to parse it
+ if parsed_dist_file:
+ dist_file_facts['distribution'] = name
+ dist_file_facts['distribution_file_path'] = path
+ # distribution and file_variety are the same here, but distribution
+ # will be changed/mapped to a more specific name.
+ # ie, dist=Fedora, file_variety=RedHat
+ dist_file_facts['distribution_file_variety'] = name
+ dist_file_facts['distribution_file_parsed'] = parsed_dist_file
+ dist_file_facts.update(parsed_dist_file_facts)
+ break
+
+ return dist_file_facts
+
+ # TODO: FIXME: split distro file parsing into its own module or class
+ def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
+ slackware_facts = {}
+ if 'Slackware' not in data:
+ return False, slackware_facts # TODO: remove
+ slackware_facts['distribution'] = name
+ version = re.findall(r'\w+[.]\w+\+?', data)
+ if version:
+ slackware_facts['distribution_version'] = version[0]
+ return True, slackware_facts
+
+ def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
+ amazon_facts = {}
+ if 'Amazon' not in data:
+ return False, amazon_facts
+ amazon_facts['distribution'] = 'Amazon'
+ if path == '/etc/os-release':
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ distribution_version = version.group(1)
+ amazon_facts['distribution_version'] = distribution_version
+ version_data = distribution_version.split(".")
+ if len(version_data) > 1:
+ major, minor = version_data
+ else:
+ major, minor = version_data[0], 'NA'
+
+ amazon_facts['distribution_major_version'] = major
+ amazon_facts['distribution_minor_version'] = minor
+ else:
+ version = [n for n in data.split() if n.isdigit()]
+ version = version[0] if version else 'NA'
+ amazon_facts['distribution_version'] = version
+
+ return True, amazon_facts
+
+ def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
+ openwrt_facts = {}
+ if 'OpenWrt' not in data:
+ return False, openwrt_facts # TODO: remove
+ openwrt_facts['distribution'] = name
+ version = re.search('DISTRIB_RELEASE="(.*)"', data)
+ if version:
+ openwrt_facts['distribution_version'] = version.groups()[0]
+ release = re.search('DISTRIB_CODENAME="(.*)"', data)
+ if release:
+ openwrt_facts['distribution_release'] = release.groups()[0]
+ return True, openwrt_facts
+
+ def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
+ alpine_facts = {}
+ alpine_facts['distribution'] = 'Alpine'
+ alpine_facts['distribution_version'] = data
+ return True, alpine_facts
+
+ def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
+ suse_facts = {}
+ if 'suse' not in data.lower():
+ return False, suse_facts # TODO: remove if tested without this
+ if path == '/etc/os-release':
+ for line in data.splitlines():
+ distribution = re.search("^NAME=(.*)", line)
+ if distribution:
+ suse_facts['distribution'] = distribution.group(1).strip('"')
+ # example pattern are 13.04 13.0 13
+ distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
+ if distribution_version:
+ suse_facts['distribution_version'] = distribution_version.group(1)
+ suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
+ if 'open' in data.lower():
+ release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
+ if release:
+ suse_facts['distribution_release'] = release.groups()[0]
+ elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
+ # SLES doesn't got funny release names
+ release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
+ if release.group(1):
+ release = release.group(1)
+ else:
+ release = "0" # no minor number, so it is the first release
+ suse_facts['distribution_release'] = release
+ elif path == '/etc/SuSE-release':
+ if 'open' in data.lower():
+ data = data.splitlines()
+ distdata = get_file_content(path).splitlines()[0]
+ suse_facts['distribution'] = distdata.split()[0]
+ for line in data:
+ release = re.search('CODENAME *= *([^\n]+)', line)
+ if release:
+ suse_facts['distribution_release'] = release.groups()[0].strip()
+ elif 'enterprise' in data.lower():
+ lines = data.splitlines()
+ distribution = lines[0].split()[0]
+ if "Server" in data:
+ suse_facts['distribution'] = "SLES"
+ elif "Desktop" in data:
+ suse_facts['distribution'] = "SLED"
+ for line in lines:
+ release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
+ if release:
+ suse_facts['distribution_release'] = release.group(1)
+ suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
+
+ # See https://www.suse.com/support/kb/doc/?id=000019341 for SLES for SAP
+ if os.path.islink('/etc/products.d/baseproduct') and os.path.realpath('/etc/products.d/baseproduct').endswith('SLES_SAP.prod'):
+ suse_facts['distribution'] = 'SLES_SAP'
+
+ return True, suse_facts
+
+ def parse_distribution_file_Debian(self, name, data, path, collected_facts):
+ debian_facts = {}
+ if 'Debian' in data or 'Raspbian' in data:
+ debian_facts['distribution'] = 'Debian'
+ release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+
+ # Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
+ if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
+ dpkg_cmd = self.module.get_bin_path('dpkg')
+ if dpkg_cmd:
+ cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ debian_facts['distribution_release'] = out.strip()
+ debian_version_path = '/etc/debian_version'
+ distdata = get_file_lines(debian_version_path)
+ for line in distdata:
+ m = re.search(r'(\d+)\.(\d+)', line.strip())
+ if m:
+ debian_facts['distribution_minor_version'] = m.groups()[1]
+ elif 'Ubuntu' in data:
+ debian_facts['distribution'] = 'Ubuntu'
+ # nothing else to do, Ubuntu gets correct info from python functions
+ elif 'SteamOS' in data:
+ debian_facts['distribution'] = 'SteamOS'
+ # nothing else to do, SteamOS gets correct info from python functions
+ elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
+ if 'Kali' in data:
+ # Kali does not provide /etc/lsb-release anymore
+ debian_facts['distribution'] = 'Kali'
+ elif 'Parrot' in data:
+ debian_facts['distribution'] = 'Parrot'
+ release = re.search('DISTRIB_RELEASE=(.*)', data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ elif 'Devuan' in data:
+ debian_facts['distribution'] = 'Devuan'
+ release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1)
+ elif 'Cumulus' in data:
+ debian_facts['distribution'] = 'Cumulus Linux'
+ version = re.search(r"VERSION_ID=(.*)", data)
+ if version:
+ major, _minor, _dummy_ver = version.group(1).split(".")
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = major
+
+ release = re.search(r'VERSION="(.*)"', data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ elif "Mint" in data:
+ debian_facts['distribution'] = 'Linux Mint'
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
+ elif 'UOS' in data or 'Uos' in data or 'uos' in data:
+ debian_facts['distribution'] = 'Uos'
+ release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
+ elif 'Deepin' in data or 'deepin' in data:
+ debian_facts['distribution'] = 'Deepin'
+ release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
+ else:
+ return False, debian_facts
+
+ return True, debian_facts
+
+ def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
+ mandriva_facts = {}
+ if 'Mandriva' in data:
+ mandriva_facts['distribution'] = 'Mandriva'
+ version = re.search('DISTRIB_RELEASE="(.*)"', data)
+ if version:
+ mandriva_facts['distribution_version'] = version.groups()[0]
+ release = re.search('DISTRIB_CODENAME="(.*)"', data)
+ if release:
+ mandriva_facts['distribution_release'] = release.groups()[0]
+ mandriva_facts['distribution'] = name
+ else:
+ return False, mandriva_facts
+
+ return True, mandriva_facts
+
+ def parse_distribution_file_NA(self, name, data, path, collected_facts):
+ na_facts = {}
+ for line in data.splitlines():
+ distribution = re.search("^NAME=(.*)", line)
+ if distribution and name == 'NA':
+ na_facts['distribution'] = distribution.group(1).strip('"')
+ version = re.search("^VERSION=(.*)", line)
+ if version and collected_facts['distribution_version'] == 'NA':
+ na_facts['distribution_version'] = version.group(1).strip('"')
+ return True, na_facts
+
+ def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
+ coreos_facts = {}
+ # FIXME: pass in ro copy of facts for this kind of thing
+ distro = get_distribution()
+
+ if distro.lower() == 'coreos':
+ if not data:
+ # include fix from #15230, #15228
+ # TODO: verify this is ok for above bugs
+ return False, coreos_facts
+ release = re.search("^GROUP=(.*)", data)
+ if release:
+ coreos_facts['distribution_release'] = release.group(1).strip('"')
+ else:
+ return False, coreos_facts # TODO: remove if tested without this
+
+ return True, coreos_facts
+
+ def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
+ flatcar_facts = {}
+ distro = get_distribution()
+
+ if distro.lower() != 'flatcar':
+ return False, flatcar_facts
+
+ if not data:
+ return False, flatcar_facts
+
+ version = re.search("VERSION=(.*)", data)
+ if version:
+ flatcar_facts['distribution_major_version'] = version.group(1).strip('"').split('.')[0]
+ flatcar_facts['distribution_version'] = version.group(1).strip('"')
+
+ return True, flatcar_facts
+
+ def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
+ clear_facts = {}
+ if "clearlinux" not in name.lower():
+ return False, clear_facts
+
+ pname = re.search('NAME="(.*)"', data)
+ if pname:
+ if 'Clear Linux' not in pname.groups()[0]:
+ return False, clear_facts
+ clear_facts['distribution'] = pname.groups()[0]
+ version = re.search('VERSION_ID=(.*)', data)
+ if version:
+ clear_facts['distribution_major_version'] = version.groups()[0]
+ clear_facts['distribution_version'] = version.groups()[0]
+ release = re.search('ID=(.*)', data)
+ if release:
+ clear_facts['distribution_release'] = release.groups()[0]
+ return True, clear_facts
+
+ def parse_distribution_file_CentOS(self, name, data, path, collected_facts):
+ centos_facts = {}
+
+ if 'CentOS Stream' in data:
+ centos_facts['distribution_release'] = 'Stream'
+ return True, centos_facts
+
+ if "TencentOS Server" in data:
+ centos_facts['distribution'] = 'TencentOS'
+ return True, centos_facts
+
+ return False, centos_facts
+
+
+class Distribution(object):
+ """
+ This subclass of Facts fills the distribution, distribution_version and distribution_release variables
+
+ To do so it checks the existence and content of typical files in /etc containing distribution information
+
+ This is unit tested. Please extend the tests to cover all distributions if you have them available.
+ """
+
+ # keep keys in sync with Conditionals page of docs
+ OS_FAMILY_MAP = {'RedHat': ['RedHat', 'RHEL', 'Fedora', 'CentOS', 'Scientific', 'SLC',
+ 'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
+ 'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
+ 'EulerOS', 'openEuler', 'AlmaLinux', 'Rocky', 'TencentOS',
+ 'EuroLinux', 'Kylin Linux Advanced Server'],
+ 'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
+ 'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
+ 'Pop!_OS', 'Parrot', 'Pardus GNU/Linux', 'Uos', 'Deepin', 'OSMC'],
+ 'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
+ 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
+ 'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
+ 'Mandrake': ['Mandrake', 'Mandriva'],
+ 'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
+ 'Slackware': ['Slackware'],
+ 'Altlinux': ['Altlinux'],
+ 'SGML': ['SGML'],
+ 'Gentoo': ['Gentoo', 'Funtoo'],
+ 'Alpine': ['Alpine'],
+ 'AIX': ['AIX'],
+ 'HP-UX': ['HPUX'],
+ 'Darwin': ['MacOSX'],
+ 'FreeBSD': ['FreeBSD', 'TrueOS'],
+ 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'],
+ 'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD'],
+ 'NetBSD': ['NetBSD'], }
+
+ OS_FAMILY = {}
+ for family, names in OS_FAMILY_MAP.items():
+ for name in names:
+ OS_FAMILY[name] = family
+
+ def __init__(self, module):
+ self.module = module
+
+ def get_distribution_facts(self):
+ distribution_facts = {}
+
+ # The platform module provides information about the running
+ # system/distribution. Use this as a baseline and fix buggy systems
+ # afterwards
+ system = platform.system()
+ distribution_facts['distribution'] = system
+ distribution_facts['distribution_release'] = platform.release()
+ distribution_facts['distribution_version'] = platform.version()
+
+ systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
+
+ if system in systems_implemented:
+ cleanedname = system.replace('-', '')
+ distfunc = getattr(self, 'get_distribution_' + cleanedname)
+ dist_func_facts = distfunc()
+ distribution_facts.update(dist_func_facts)
+ elif system == 'Linux':
+
+ distribution_files = DistributionFiles(module=self.module)
+
+ # linux_distribution_facts = LinuxDistribution(module).get_distribution_facts()
+ dist_file_facts = distribution_files.process_dist_files()
+
+ distribution_facts.update(dist_file_facts)
+
+ distro = distribution_facts['distribution']
+
+ # look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
+ distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
+
+ return distribution_facts
+
+ def get_distribution_AIX(self):
+ aix_facts = {}
+ rc, out, err = self.module.run_command("/usr/bin/oslevel")
+ data = out.split('.')
+ aix_facts['distribution_major_version'] = data[0]
+ if len(data) > 1:
+ aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1])
+ aix_facts['distribution_release'] = data[1]
+ else:
+ aix_facts['distribution_version'] = data[0]
+ return aix_facts
+
+ def get_distribution_HPUX(self):
+ hpux_facts = {}
+ rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
+ data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
+ if data:
+ hpux_facts['distribution_version'] = data.groups()[0]
+ hpux_facts['distribution_release'] = data.groups()[1]
+ return hpux_facts
+
+ def get_distribution_Darwin(self):
+ darwin_facts = {}
+ darwin_facts['distribution'] = 'MacOSX'
+ rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
+ data = out.split()[-1]
+ if data:
+ darwin_facts['distribution_major_version'] = data.split('.')[0]
+ darwin_facts['distribution_version'] = data
+ return darwin_facts
+
+ def get_distribution_FreeBSD(self):
+ freebsd_facts = {}
+ freebsd_facts['distribution_release'] = platform.release()
+ data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT|RC|PRERELEASE).*', freebsd_facts['distribution_release'])
+ if 'trueos' in platform.version():
+ freebsd_facts['distribution'] = 'TrueOS'
+ if data:
+ freebsd_facts['distribution_major_version'] = data.group(1)
+ freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
+ return freebsd_facts
+
+ def get_distribution_OpenBSD(self):
+ openbsd_facts = {}
+ openbsd_facts['distribution_version'] = platform.release()
+ rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
+ if match:
+ openbsd_facts['distribution_release'] = match.groups()[0]
+ else:
+ openbsd_facts['distribution_release'] = 'release'
+ return openbsd_facts
+
+ def get_distribution_DragonFly(self):
+ dragonfly_facts = {
+ 'distribution_release': platform.release()
+ }
+ rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out)
+ if match:
+ dragonfly_facts['distribution_major_version'] = match.group(1)
+ dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3]
+ return dragonfly_facts
+
+ def get_distribution_NetBSD(self):
+ netbsd_facts = {}
+ platform_release = platform.release()
+ netbsd_facts['distribution_release'] = platform_release
+ rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.match(r'NetBSD\s(\d+)\.(\d+)\s\((GENERIC)\).*', out)
+ if match:
+ netbsd_facts['distribution_major_version'] = match.group(1)
+ netbsd_facts['distribution_version'] = '%s.%s' % match.groups()[:2]
+ else:
+ netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
+ netbsd_facts['distribution_version'] = platform_release
+ return netbsd_facts
+
+ def get_distribution_SMGL(self):
+ smgl_facts = {}
+ smgl_facts['distribution'] = 'Source Mage GNU/Linux'
+ return smgl_facts
+
+ def get_distribution_SunOS(self):
+ sunos_facts = {}
+
+ data = get_file_content('/etc/release').splitlines()[0]
+
+ if 'Solaris' in data:
+ # for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11
+ uname_r = get_uname(self.module, flags=['-r'])
+ ora_prefix = ''
+ if 'Oracle Solaris' in data:
+ data = data.replace('Oracle ', '')
+ ora_prefix = 'Oracle '
+ sunos_facts['distribution'] = data.split()[0]
+ sunos_facts['distribution_version'] = data.split()[1]
+ sunos_facts['distribution_release'] = ora_prefix + data
+ sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip()
+ return sunos_facts
+
+ uname_v = get_uname(self.module, flags=['-v'])
+ distribution_version = None
+
+ if 'SmartOS' in data:
+ sunos_facts['distribution'] = 'SmartOS'
+ if _file_exists('/etc/product'):
+ product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
+ if 'Image' in product_data:
+ distribution_version = product_data.get('Image').split()[-1]
+ elif 'OpenIndiana' in data:
+ sunos_facts['distribution'] = 'OpenIndiana'
+ elif 'OmniOS' in data:
+ sunos_facts['distribution'] = 'OmniOS'
+ distribution_version = data.split()[-1]
+ elif uname_v is not None and 'NexentaOS_' in uname_v:
+ sunos_facts['distribution'] = 'Nexenta'
+ distribution_version = data.split()[-1].lstrip('v')
+
+ if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
+ sunos_facts['distribution_release'] = data.strip()
+ if distribution_version is not None:
+ sunos_facts['distribution_version'] = distribution_version
+ elif uname_v is not None:
+ sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip()
+ return sunos_facts
+
+ return sunos_facts
+
+
+class DistributionFactCollector(BaseFactCollector):
+ name = 'distribution'
+ _fact_ids = set(['distribution_version',
+ 'distribution_release',
+ 'distribution_major_version',
+ 'os_family']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ facts_dict = {}
+ if not module:
+ return facts_dict
+
+ distribution = Distribution(module=module)
+ distro_facts = distribution.get_distribution_facts()
+
+ return distro_facts
diff --git a/lib/ansible/module_utils/facts/system/dns.py b/lib/ansible/module_utils/facts/system/dns.py
new file mode 100644
index 0000000..d913f4a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/dns.py
@@ -0,0 +1,68 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class DnsFactCollector(BaseFactCollector):
+ name = 'dns'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ dns_facts = {}
+
+ # TODO: flatten
+ dns_facts['dns'] = {}
+
+ for line in get_file_content('/etc/resolv.conf', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ tokens = line.split()
+ if len(tokens) == 0:
+ continue
+ if tokens[0] == 'nameserver':
+ if 'nameservers' not in dns_facts['dns']:
+ dns_facts['dns']['nameservers'] = []
+ for nameserver in tokens[1:]:
+ dns_facts['dns']['nameservers'].append(nameserver)
+ elif tokens[0] == 'domain':
+ if len(tokens) > 1:
+ dns_facts['dns']['domain'] = tokens[1]
+ elif tokens[0] == 'search':
+ dns_facts['dns']['search'] = []
+ for suffix in tokens[1:]:
+ dns_facts['dns']['search'].append(suffix)
+ elif tokens[0] == 'sortlist':
+ dns_facts['dns']['sortlist'] = []
+ for address in tokens[1:]:
+ dns_facts['dns']['sortlist'].append(address)
+ elif tokens[0] == 'options':
+ dns_facts['dns']['options'] = {}
+ if len(tokens) > 1:
+ for option in tokens[1:]:
+ option_tokens = option.split(':', 1)
+ if len(option_tokens) == 0:
+ continue
+ val = len(option_tokens) == 2 and option_tokens[1] or True
+ dns_facts['dns']['options'][option_tokens[0]] = val
+
+ return dns_facts
diff --git a/lib/ansible/module_utils/facts/system/env.py b/lib/ansible/module_utils/facts/system/env.py
new file mode 100644
index 0000000..605443f
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/env.py
@@ -0,0 +1,39 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.six import iteritems
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class EnvFactCollector(BaseFactCollector):
+ name = 'env'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ env_facts = {}
+ env_facts['env'] = {}
+
+ for k, v in iteritems(os.environ):
+ env_facts['env'][k] = v
+
+ return env_facts
diff --git a/lib/ansible/module_utils/facts/system/fips.py b/lib/ansible/module_utils/facts/system/fips.py
new file mode 100644
index 0000000..7e56610
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/fips.py
@@ -0,0 +1,39 @@
+# Determine if a system is in 'fips' mode
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FipsFactCollector(BaseFactCollector):
+ name = 'fips'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ # NOTE: this is populated even if it is not set
+ fips_facts = {}
+ fips_facts['fips'] = False
+ data = get_file_content('/proc/sys/crypto/fips_enabled')
+ if data and data == '1':
+ fips_facts['fips'] = True
+ return fips_facts
diff --git a/lib/ansible/module_utils/facts/system/loadavg.py b/lib/ansible/module_utils/facts/system/loadavg.py
new file mode 100644
index 0000000..8475f2a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/loadavg.py
@@ -0,0 +1,31 @@
+# (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class LoadAvgFactCollector(BaseFactCollector):
+ name = 'loadavg'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ facts = {}
+ try:
+ # (0.58, 0.82, 0.98)
+ loadavg = os.getloadavg()
+ facts['loadavg'] = {
+ '1m': loadavg[0],
+ '5m': loadavg[1],
+ '15m': loadavg[2]
+ }
+ except OSError:
+ pass
+
+ return facts
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
new file mode 100644
index 0000000..bacdbe0
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -0,0 +1,113 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import json
+import os
+import stat
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.collector import BaseFactCollector
+from ansible.module_utils.six.moves import configparser, StringIO
+
+
+class LocalFactCollector(BaseFactCollector):
+ name = 'local'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ local_facts = {}
+ local_facts['local'] = {}
+
+ if not module:
+ return local_facts
+
+ fact_path = module.params.get('fact_path', None)
+
+ if not fact_path or not os.path.exists(fact_path):
+ return local_facts
+
+ local = {}
+ # go over .fact files, run executables, read rest, skip bad with warning and note
+ for fn in sorted(glob.glob(fact_path + '/*.fact')):
+ # use filename for key where it will sit under local facts
+ fact_base = os.path.basename(fn).replace('.fact', '')
+ failed = None
+ try:
+ executable_fact = stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]
+ except OSError as e:
+ failed = 'Could not stat fact (%s): %s' % (fn, to_text(e))
+ local[fact_base] = failed
+ module.warn(failed)
+ continue
+ if executable_fact:
+ try:
+ # run it
+ rc, out, err = module.run_command(fn)
+ if rc != 0:
+ failed = 'Failure executing fact script (%s), rc: %s, err: %s' % (fn, rc, err)
+ except (IOError, OSError) as e:
+ failed = 'Could not execute fact script (%s): %s' % (fn, to_text(e))
+
+ if failed is not None:
+ local[fact_base] = failed
+ module.warn(failed)
+ continue
+ else:
+ # ignores exceptions and returns empty
+ out = get_file_content(fn, default='')
+
+ try:
+ # ensure we have unicode
+ out = to_text(out, errors='surrogate_or_strict')
+ except UnicodeError:
+ fact = 'error loading fact - output of running "%s" was not utf-8' % fn
+ local[fact_base] = fact
+ module.warn(fact)
+ continue
+
+ # try to read it as json first
+ try:
+ fact = json.loads(out)
+ except ValueError:
+ # if that fails read it with ConfigParser
+ cp = configparser.ConfigParser()
+ try:
+ cp.readfp(StringIO(out))
+ except configparser.Error:
+ fact = "error loading facts as JSON or ini - please check content: %s" % fn
+ module.warn(fact)
+ else:
+ fact = {}
+ for sect in cp.sections():
+ if sect not in fact:
+ fact[sect] = {}
+ for opt in cp.options(sect):
+ val = cp.get(sect, opt)
+ fact[sect][opt] = val
+ except Exception as e:
+ fact = "Failed to convert (%s) to JSON: %s" % (fn, to_text(e))
+ module.warn(fact)
+
+ local[fact_base] = fact
+
+ local_facts['local'] = local
+ return local_facts
diff --git a/lib/ansible/module_utils/facts/system/lsb.py b/lib/ansible/module_utils/facts/system/lsb.py
new file mode 100644
index 0000000..2dc1433
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/lsb.py
@@ -0,0 +1,108 @@
+# Collect facts related to LSB (Linux Standard Base)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_lines
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class LSBFactCollector(BaseFactCollector):
+ name = 'lsb'
+ _fact_ids = set() # type: t.Set[str]
+ STRIP_QUOTES = r'\'\"\\'
+
+ def _lsb_release_bin(self, lsb_path, module):
+ lsb_facts = {}
+
+ if not lsb_path:
+ return lsb_facts
+
+ rc, out, err = module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
+ if rc != 0:
+ return lsb_facts
+
+ for line in out.splitlines():
+ if len(line) < 1 or ':' not in line:
+ continue
+ value = line.split(':', 1)[1].strip()
+
+ if 'LSB Version:' in line:
+ lsb_facts['release'] = value
+ elif 'Distributor ID:' in line:
+ lsb_facts['id'] = value
+ elif 'Description:' in line:
+ lsb_facts['description'] = value
+ elif 'Release:' in line:
+ lsb_facts['release'] = value
+ elif 'Codename:' in line:
+ lsb_facts['codename'] = value
+
+ return lsb_facts
+
+ def _lsb_release_file(self, etc_lsb_release_location):
+ lsb_facts = {}
+
+ if not os.path.exists(etc_lsb_release_location):
+ return lsb_facts
+
+ for line in get_file_lines(etc_lsb_release_location):
+ value = line.split('=', 1)[1].strip()
+
+ if 'DISTRIB_ID' in line:
+ lsb_facts['id'] = value
+ elif 'DISTRIB_RELEASE' in line:
+ lsb_facts['release'] = value
+ elif 'DISTRIB_DESCRIPTION' in line:
+ lsb_facts['description'] = value
+ elif 'DISTRIB_CODENAME' in line:
+ lsb_facts['codename'] = value
+
+ return lsb_facts
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ lsb_facts = {}
+
+ if not module:
+ return facts_dict
+
+ lsb_path = module.get_bin_path('lsb_release')
+
+ # try the 'lsb_release' script first
+ if lsb_path:
+ lsb_facts = self._lsb_release_bin(lsb_path,
+ module=module)
+
+ # no lsb_release, try looking in /etc/lsb-release
+ if not lsb_facts:
+ lsb_facts = self._lsb_release_file('/etc/lsb-release')
+
+ if lsb_facts and 'release' in lsb_facts:
+ lsb_facts['major_release'] = lsb_facts['release'].split('.')[0]
+
+ for k, v in lsb_facts.items():
+ if v:
+ lsb_facts[k] = v.strip(LSBFactCollector.STRIP_QUOTES)
+
+ facts_dict['lsb'] = lsb_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/pkg_mgr.py b/lib/ansible/module_utils/facts/system/pkg_mgr.py
new file mode 100644
index 0000000..704ea20
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/pkg_mgr.py
@@ -0,0 +1,165 @@
+# Collect facts related to the system package manager
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# A list of dicts. If there is a platform with more than one
+# package manager, put the preferred one last. If there is an
+# ansible module, use that as the value for the 'name' key.
+PKG_MGRS = [{'path': '/usr/bin/rpm-ostree', 'name': 'atomic_container'},
+ {'path': '/usr/bin/yum', 'name': 'yum'},
+ {'path': '/usr/bin/dnf', 'name': 'dnf'},
+ {'path': '/usr/bin/apt-get', 'name': 'apt'},
+ {'path': '/usr/bin/zypper', 'name': 'zypper'},
+ {'path': '/usr/sbin/urpmi', 'name': 'urpmi'},
+ {'path': '/usr/bin/pacman', 'name': 'pacman'},
+ {'path': '/bin/opkg', 'name': 'opkg'},
+ {'path': '/usr/pkg/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/local/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/tools/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/local/bin/port', 'name': 'macports'},
+ {'path': '/usr/local/bin/brew', 'name': 'homebrew'},
+ {'path': '/opt/homebrew/bin/brew', 'name': 'homebrew'},
+ {'path': '/sbin/apk', 'name': 'apk'},
+ {'path': '/usr/sbin/pkg', 'name': 'pkgng'},
+ {'path': '/usr/sbin/swlist', 'name': 'swdepot'},
+ {'path': '/usr/bin/emerge', 'name': 'portage'},
+ {'path': '/usr/sbin/pkgadd', 'name': 'svr4pkg'},
+ {'path': '/usr/bin/pkg', 'name': 'pkg5'},
+ {'path': '/usr/bin/xbps-install', 'name': 'xbps'},
+ {'path': '/usr/local/sbin/pkg', 'name': 'pkgng'},
+ {'path': '/usr/bin/swupd', 'name': 'swupd'},
+ {'path': '/usr/sbin/sorcery', 'name': 'sorcery'},
+ {'path': '/usr/bin/installp', 'name': 'installp'},
+ {'path': '/QOpenSys/pkgs/bin/yum', 'name': 'yum'},
+ ]
+
+
+class OpenBSDPkgMgrFactCollector(BaseFactCollector):
+ name = 'pkg_mgr'
+ _fact_ids = set() # type: t.Set[str]
+ _platform = 'OpenBSD'
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+
+ facts_dict['pkg_mgr'] = 'openbsd_pkg'
+ return facts_dict
+
+
+# the fact ends up being 'pkg_mgr' so stick with that naming/spelling
+class PkgMgrFactCollector(BaseFactCollector):
+ name = 'pkg_mgr'
+ _fact_ids = set() # type: t.Set[str]
+ _platform = 'Generic'
+ required_facts = set(['distribution'])
+
+ def _pkg_mgr_exists(self, pkg_mgr_name):
+ for cur_pkg_mgr in [pkg_mgr for pkg_mgr in PKG_MGRS if pkg_mgr['name'] == pkg_mgr_name]:
+ if os.path.exists(cur_pkg_mgr['path']):
+ return pkg_mgr_name
+
+ def _check_rh_versions(self, pkg_mgr_name, collected_facts):
+ if os.path.exists('/run/ostree-booted'):
+ return "atomic_container"
+
+ if collected_facts['ansible_distribution'] == 'Fedora':
+ try:
+ if int(collected_facts['ansible_distribution_major_version']) < 23:
+ if self._pkg_mgr_exists('yum'):
+ pkg_mgr_name = 'yum'
+
+ else:
+ if self._pkg_mgr_exists('dnf'):
+ pkg_mgr_name = 'dnf'
+ except ValueError:
+ # If there's some new magical Fedora version in the future,
+ # just default to dnf
+ pkg_mgr_name = 'dnf'
+ elif collected_facts['ansible_distribution'] == 'Amazon':
+ try:
+ if int(collected_facts['ansible_distribution_major_version']) < 2022:
+ if self._pkg_mgr_exists('yum'):
+ pkg_mgr_name = 'yum'
+ else:
+ if self._pkg_mgr_exists('dnf'):
+ pkg_mgr_name = 'dnf'
+ except ValueError:
+ pkg_mgr_name = 'dnf'
+ else:
+ # If it's not one of the above and it's Red Hat family of distros, assume
+ # RHEL or a clone. For versions of RHEL < 8 that Ansible supports, the
+ # vendor supported official package manager is 'yum' and in RHEL 8+
+ # (as far as we know at the time of this writing) it is 'dnf'.
+ # If anyone wants to force a non-official package manager then they
+ # can define a provider to either the package or yum action plugins.
+ if int(collected_facts['ansible_distribution_major_version']) < 8:
+ pkg_mgr_name = 'yum'
+ else:
+ pkg_mgr_name = 'dnf'
+ return pkg_mgr_name
+
+ def _check_apt_flavor(self, pkg_mgr_name):
+ # Check if '/usr/bin/apt' is APT-RPM or an ordinary (dpkg-based) APT.
+ # There's rpm package on Debian, so checking if /usr/bin/rpm exists
+ # is not enough. Instead ask RPM if /usr/bin/apt-get belongs to some
+ # RPM package.
+ rpm_query = '/usr/bin/rpm -q --whatprovides /usr/bin/apt-get'.split()
+ if os.path.exists('/usr/bin/rpm'):
+ with open(os.devnull, 'w') as null:
+ try:
+ subprocess.check_call(rpm_query, stdout=null, stderr=null)
+ pkg_mgr_name = 'apt_rpm'
+ except subprocess.CalledProcessError:
+ # No apt-get in RPM database. Looks like Debian/Ubuntu
+ # with rpm package installed
+ pkg_mgr_name = 'apt'
+ return pkg_mgr_name
+
+ def pkg_mgrs(self, collected_facts):
+ # Filter out the /usr/bin/pkg because on Altlinux it is actually the
+ # perl-Package (not Solaris package manager).
+ # Since the pkg5 takes precedence over apt, this workaround
+ # is required to select the suitable package manager on Altlinux.
+ if collected_facts['ansible_os_family'] == 'Altlinux':
+ return filter(lambda pkg: pkg['path'] != '/usr/bin/pkg', PKG_MGRS)
+ else:
+ return PKG_MGRS
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ collected_facts = collected_facts or {}
+
+ pkg_mgr_name = 'unknown'
+ for pkg in self.pkg_mgrs(collected_facts):
+ if os.path.exists(pkg['path']):
+ pkg_mgr_name = pkg['name']
+
+ # Handle distro family defaults when more than one package manager is
+ # installed or available to the distro, the ansible_fact entry should be
+ # the default package manager officially supported by the distro.
+ if collected_facts['ansible_os_family'] == "RedHat":
+ pkg_mgr_name = self._check_rh_versions(pkg_mgr_name, collected_facts)
+ elif collected_facts['ansible_os_family'] == 'Debian' and pkg_mgr_name != 'apt':
+ # It's possible to install yum, dnf, zypper, rpm, etc inside of
+ # Debian. Doing so does not mean the system wants to use them.
+ pkg_mgr_name = 'apt'
+ elif collected_facts['ansible_os_family'] == 'Altlinux':
+ if pkg_mgr_name == 'apt':
+ pkg_mgr_name = 'apt_rpm'
+
+ # Check if /usr/bin/apt-get is ordinary (dpkg-based) APT or APT-RPM
+ if pkg_mgr_name == 'apt':
+ pkg_mgr_name = self._check_apt_flavor(pkg_mgr_name)
+
+ facts_dict['pkg_mgr'] = pkg_mgr_name
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/platform.py b/lib/ansible/module_utils/facts/system/platform.py
new file mode 100644
index 0000000..b947801
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/platform.py
@@ -0,0 +1,99 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import socket
+import platform
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# i86pc is a Solaris and derivatives-ism
+SOLARIS_I86_RE_PATTERN = r'i([3456]86|86pc)'
+solaris_i86_re = re.compile(SOLARIS_I86_RE_PATTERN)
+
+
+class PlatformFactCollector(BaseFactCollector):
+ name = 'platform'
+ _fact_ids = set(['system',
+ 'kernel',
+ 'kernel_version',
+ 'machine',
+ 'python_version',
+ 'architecture',
+ 'machine_id']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ platform_facts = {}
+ # platform.system() can be Linux, Darwin, Java, or Windows
+ platform_facts['system'] = platform.system()
+ platform_facts['kernel'] = platform.release()
+ platform_facts['kernel_version'] = platform.version()
+ platform_facts['machine'] = platform.machine()
+
+ platform_facts['python_version'] = platform.python_version()
+
+ platform_facts['fqdn'] = socket.getfqdn()
+ platform_facts['hostname'] = platform.node().split('.')[0]
+ platform_facts['nodename'] = platform.node()
+
+ platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])
+
+ arch_bits = platform.architecture()[0]
+
+ platform_facts['userspace_bits'] = arch_bits.replace('bit', '')
+ if platform_facts['machine'] == 'x86_64':
+ platform_facts['architecture'] = platform_facts['machine']
+ if platform_facts['userspace_bits'] == '64':
+ platform_facts['userspace_architecture'] = 'x86_64'
+ elif platform_facts['userspace_bits'] == '32':
+ platform_facts['userspace_architecture'] = 'i386'
+ elif solaris_i86_re.search(platform_facts['machine']):
+ platform_facts['architecture'] = 'i386'
+ if platform_facts['userspace_bits'] == '64':
+ platform_facts['userspace_architecture'] = 'x86_64'
+ elif platform_facts['userspace_bits'] == '32':
+ platform_facts['userspace_architecture'] = 'i386'
+ else:
+ platform_facts['architecture'] = platform_facts['machine']
+
+ if platform_facts['system'] == 'AIX':
+ # Attempt to use getconf to figure out architecture
+ # fall back to bootinfo if needed
+ getconf_bin = module.get_bin_path('getconf')
+ if getconf_bin:
+ rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
+ data = out.splitlines()
+ platform_facts['architecture'] = data[0]
+ else:
+ bootinfo_bin = module.get_bin_path('bootinfo')
+ rc, out, err = module.run_command([bootinfo_bin, '-p'])
+ data = out.splitlines()
+ platform_facts['architecture'] = data[0]
+ elif platform_facts['system'] == 'OpenBSD':
+ platform_facts['architecture'] = platform.uname()[5]
+
+ machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
+ if machine_id:
+ machine_id = machine_id.splitlines()[0]
+ platform_facts["machine_id"] = machine_id
+
+ return platform_facts
diff --git a/lib/ansible/module_utils/facts/system/python.py b/lib/ansible/module_utils/facts/system/python.py
new file mode 100644
index 0000000..50b66dd
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/python.py
@@ -0,0 +1,62 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+try:
+ # Check if we have SSLContext support
+ from ssl import create_default_context, SSLContext
+ del create_default_context
+ del SSLContext
+ HAS_SSLCONTEXT = True
+except ImportError:
+ HAS_SSLCONTEXT = False
+
+
+class PythonFactCollector(BaseFactCollector):
+ name = 'python'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ python_facts = {}
+ python_facts['python'] = {
+ 'version': {
+ 'major': sys.version_info[0],
+ 'minor': sys.version_info[1],
+ 'micro': sys.version_info[2],
+ 'releaselevel': sys.version_info[3],
+ 'serial': sys.version_info[4]
+ },
+ 'version_info': list(sys.version_info),
+ 'executable': sys.executable,
+ 'has_sslcontext': HAS_SSLCONTEXT
+ }
+
+ try:
+ python_facts['python']['type'] = sys.subversion[0]
+ except AttributeError:
+ try:
+ python_facts['python']['type'] = sys.implementation.name
+ except AttributeError:
+ python_facts['python']['type'] = None
+
+ return python_facts
diff --git a/lib/ansible/module_utils/facts/system/selinux.py b/lib/ansible/module_utils/facts/system/selinux.py
new file mode 100644
index 0000000..5c6b012
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/selinux.py
@@ -0,0 +1,93 @@
+# Collect facts related to selinux
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+try:
+ from ansible.module_utils.compat import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ HAVE_SELINUX = False
+
+SELINUX_MODE_DICT = {
+ 1: 'enforcing',
+ 0: 'permissive',
+ -1: 'disabled'
+}
+
+
+class SelinuxFactCollector(BaseFactCollector):
+ name = 'selinux'
+ _fact_ids = set() # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ selinux_facts = {}
+
+ # If selinux library is missing, only set the status and selinux_python_present since
+ # there is no way to tell if SELinux is enabled or disabled on the system
+ # without the library.
+ if not HAVE_SELINUX:
+ selinux_facts['status'] = 'Missing selinux Python library'
+ facts_dict['selinux'] = selinux_facts
+ facts_dict['selinux_python_present'] = False
+ return facts_dict
+
+ # Set a boolean for testing whether the Python library is present
+ facts_dict['selinux_python_present'] = True
+
+ if not selinux.is_selinux_enabled():
+ selinux_facts['status'] = 'disabled'
+ else:
+ selinux_facts['status'] = 'enabled'
+
+ try:
+ selinux_facts['policyvers'] = selinux.security_policyvers()
+ except (AttributeError, OSError):
+ selinux_facts['policyvers'] = 'unknown'
+
+ try:
+ (rc, configmode) = selinux.selinux_getenforcemode()
+ if rc == 0:
+ selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown')
+ else:
+ selinux_facts['config_mode'] = 'unknown'
+ except (AttributeError, OSError):
+ selinux_facts['config_mode'] = 'unknown'
+
+ try:
+ mode = selinux.security_getenforce()
+ selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown')
+ except (AttributeError, OSError):
+ selinux_facts['mode'] = 'unknown'
+
+ try:
+ (rc, policytype) = selinux.selinux_getpolicytype()
+ if rc == 0:
+ selinux_facts['type'] = policytype
+ else:
+ selinux_facts['type'] = 'unknown'
+ except (AttributeError, OSError):
+ selinux_facts['type'] = 'unknown'
+
+ facts_dict['selinux'] = selinux_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py
new file mode 100644
index 0000000..d862ac9
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/service_mgr.py
@@ -0,0 +1,152 @@
+# Collect facts related to system service manager and init.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import re
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils._text import to_native
+
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# The distutils module is not shipped with SUNWPython on Solaris.
+# It's in the SUNWPython-devel package which also contains development files
+# that don't belong on production boxes. Since our Solaris code doesn't
+# depend on LooseVersion, do not import it on Solaris.
+if platform.system() != 'SunOS':
+ from ansible.module_utils.compat.version import LooseVersion
+
+
+class ServiceMgrFactCollector(BaseFactCollector):
+ name = 'service_mgr'
+ _fact_ids = set() # type: t.Set[str]
+ required_facts = set(['platform', 'distribution'])
+
+ @staticmethod
+ def is_systemd_managed(module):
+ # tools must be installed
+ if module.get_bin_path('systemctl'):
+
+ # this should show if systemd is the boot init system, if checking init faild to mark as systemd
+ # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
+ for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
+ if os.path.exists(canary):
+ return True
+ return False
+
+ @staticmethod
+ def is_systemd_managed_offline(module):
+ # tools must be installed
+ if module.get_bin_path('systemctl'):
+ # check if /sbin/init is a symlink to systemd
+ # on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed.
+ if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd':
+ return True
+ return False
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+
+ if not module:
+ return facts_dict
+
+ collected_facts = collected_facts or {}
+ service_mgr_name = None
+
+ # TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
+ # also other OSs other than linux might need to check across several possible candidates
+
+ # Mapping of proc_1 values to more useful names
+ proc_1_map = {
+ 'procd': 'openwrt_init',
+ 'runit-init': 'runit',
+ 'svscan': 'svc',
+ 'openrc-init': 'openrc',
+ }
+
+ # try various forms of querying pid 1
+ proc_1 = get_file_content('/proc/1/comm')
+ if proc_1 is None:
+ rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
+
+ # if command fails, or stdout is empty string or the output of the command starts with what looks like a PID,
+ # then the 'ps' command probably didn't work the way we wanted, probably because it's busybox
+ if rc != 0 or not proc_1.strip() or re.match(r' *[0-9]+ ', proc_1):
+ proc_1 = None
+
+ # The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
+ if proc_1 == "COMMAND\n":
+ proc_1 = None
+
+ if proc_1 is None and os.path.islink('/sbin/init'):
+ proc_1 = os.readlink('/sbin/init')
+
+ if proc_1 is not None:
+ proc_1 = os.path.basename(proc_1)
+ proc_1 = to_native(proc_1)
+ proc_1 = proc_1.strip()
+
+ if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
+ # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
+ proc_1 = None
+
+ # if not init/None it should be an identifiable or custom init, so we are done!
+ if proc_1 is not None:
+ # Lookup proc_1 value in map and use proc_1 value itself if no match
+ service_mgr_name = proc_1_map.get(proc_1, proc_1)
+
+ # start with the easy ones
+ elif collected_facts.get('ansible_distribution', None) == 'MacOSX':
+ # FIXME: find way to query executable, version matching is not ideal
+ if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
+ service_mgr_name = 'launchd'
+ else:
+ service_mgr_name = 'systemstarter'
+ elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']:
+ # FIXME: we might want to break out to individual BSDs or 'rc'
+ service_mgr_name = 'bsdinit'
+ elif collected_facts.get('ansible_system') == 'AIX':
+ service_mgr_name = 'src'
+ elif collected_facts.get('ansible_system') == 'SunOS':
+ service_mgr_name = 'smf'
+ elif collected_facts.get('ansible_distribution') == 'OpenWrt':
+ service_mgr_name = 'openwrt_init'
+ elif collected_facts.get('ansible_system') == 'Linux':
+ # FIXME: mv is_systemd_managed
+ if self.is_systemd_managed(module=module):
+ service_mgr_name = 'systemd'
+ elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
+ service_mgr_name = 'upstart'
+ elif os.path.exists('/sbin/openrc'):
+ service_mgr_name = 'openrc'
+ elif self.is_systemd_managed_offline(module=module):
+ service_mgr_name = 'systemd'
+ elif os.path.exists('/etc/init.d/'):
+ service_mgr_name = 'sysvinit'
+
+ if not service_mgr_name:
+ # if we cannot detect, fallback to generic 'service'
+ service_mgr_name = 'service'
+
+ facts_dict['service_mgr'] = service_mgr_name
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
new file mode 100644
index 0000000..85691c7
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
@@ -0,0 +1,56 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class SshPubKeyFactCollector(BaseFactCollector):
+ name = 'ssh_pub_keys'
+ _fact_ids = set(['ssh_host_pub_keys',
+ 'ssh_host_key_dsa_public',
+ 'ssh_host_key_rsa_public',
+ 'ssh_host_key_ecdsa_public',
+ 'ssh_host_key_ed25519_public']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ ssh_pub_key_facts = {}
+ algos = ('dsa', 'rsa', 'ecdsa', 'ed25519')
+
+ # list of directories to check for ssh keys
+ # used in the order listed here, the first one with keys is used
+ keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
+
+ for keydir in keydirs:
+ for algo in algos:
+ factname = 'ssh_host_key_%s_public' % algo
+ if factname in ssh_pub_key_facts:
+ # a previous keydir was already successful, stop looking
+ # for keys
+ return ssh_pub_key_facts
+ key_filename = '%s/ssh_host_%s_key.pub' % (keydir, algo)
+ keydata = get_file_content(key_filename)
+ if keydata is not None:
+ (keytype, key) = keydata.split()[0:2]
+ ssh_pub_key_facts[factname] = key
+ ssh_pub_key_facts[factname + '_keytype'] = keytype
+
+ return ssh_pub_key_facts
diff --git a/lib/ansible/module_utils/facts/system/user.py b/lib/ansible/module_utils/facts/system/user.py
new file mode 100644
index 0000000..2efa993
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/user.py
@@ -0,0 +1,55 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import getpass
+import os
+import pwd
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class UserFactCollector(BaseFactCollector):
+ name = 'user'
+ _fact_ids = set(['user_id', 'user_uid', 'user_gid',
+ 'user_gecos', 'user_dir', 'user_shell',
+ 'real_user_id', 'effective_user_id',
+ 'effective_group_ids']) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ user_facts = {}
+
+ user_facts['user_id'] = getpass.getuser()
+
+ try:
+ pwent = pwd.getpwnam(getpass.getuser())
+ except KeyError:
+ pwent = pwd.getpwuid(os.getuid())
+
+ user_facts['user_uid'] = pwent.pw_uid
+ user_facts['user_gid'] = pwent.pw_gid
+ user_facts['user_gecos'] = pwent.pw_gecos
+ user_facts['user_dir'] = pwent.pw_dir
+ user_facts['user_shell'] = pwent.pw_shell
+ user_facts['real_user_id'] = os.getuid()
+ user_facts['effective_user_id'] = os.geteuid()
+ user_facts['real_group_id'] = os.getgid()
+ user_facts['effective_group_id'] = os.getgid()
+
+ return user_facts
diff --git a/lib/ansible/module_utils/facts/timeout.py b/lib/ansible/module_utils/facts/timeout.py
new file mode 100644
index 0000000..ebb71cc
--- /dev/null
+++ b/lib/ansible/module_utils/facts/timeout.py
@@ -0,0 +1,70 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+import multiprocessing.pool as mp
+
+# timeout function to make sure some fact gathering
+# steps do not exceed a time limit
+
+GATHER_TIMEOUT = None
+DEFAULT_GATHER_TIMEOUT = 10
+
+
+class TimeoutError(Exception):
+ pass
+
+
+def timeout(seconds=None, error_message="Timer expired"):
+ """
+ Timeout decorator to expire after a set number of seconds. This raises an
+ ansible.module_utils.facts.TimeoutError if the timeout is hit before the
+ function completes.
+ """
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ timeout_value = seconds
+ if timeout_value is None:
+ timeout_value = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
+
+ pool = mp.ThreadPool(processes=1)
+ res = pool.apply_async(func, args, kwargs)
+ pool.close()
+ try:
+ return res.get(timeout_value)
+ except multiprocessing.TimeoutError:
+ # This is an ansible.module_utils.common.facts.timeout.TimeoutError
+ raise TimeoutError('Timer expired after %s seconds' % timeout_value)
+ finally:
+ pool.terminate()
+
+ return wrapper
+
+ # If we were called as @timeout, then the first parameter will be the
+ # function we are to wrap instead of the number of seconds. Detect this
+ # and correct it by setting seconds to our default value and return the
+ # inner decorator function manually wrapped around the function
+ if callable(seconds):
+ func = seconds
+ seconds = None
+ return decorator(func)
+
+ # If we were called as @timeout([...]) then python itself will take
+ # care of wrapping the inner decorator around the function
+
+ return decorator
diff --git a/lib/ansible/module_utils/facts/utils.py b/lib/ansible/module_utils/facts/utils.py
new file mode 100644
index 0000000..a6027ab
--- /dev/null
+++ b/lib/ansible/module_utils/facts/utils.py
@@ -0,0 +1,102 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fcntl
+import os
+
+
+def get_file_content(path, default=None, strip=True):
+ '''
+ Return the contents of a given file path
+
+ :args path: path to file to return contents from
+ :args default: value to return if we could not read file
+ :args strip: controls if we strip whitespace from the result or not
+
+ :returns: String with file contents (optionally stripped) or 'default' value
+ '''
+ data = default
+ if os.path.exists(path) and os.access(path, os.R_OK):
+ datafile = None
+ try:
+ datafile = open(path)
+ try:
+ # try to not enter kernel 'block' mode, which prevents timeouts
+ fd = datafile.fileno()
+ flag = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
+ except Exception:
+ pass # not required to operate, but would have been nice!
+
+ # actually read the data
+ data = datafile.read()
+
+ if strip:
+ data = data.strip()
+
+ if len(data) == 0:
+ data = default
+
+ except Exception:
+ # ignore errors as some jails/containers might have readable permissions but not allow reads
+ pass
+ finally:
+ if datafile is not None:
+ datafile.close()
+
+ return data
+
+
+def get_file_lines(path, strip=True, line_sep=None):
+ '''get list of lines from file'''
+ data = get_file_content(path, strip=strip)
+ if data:
+ if line_sep is None:
+ ret = data.splitlines()
+ else:
+ if len(line_sep) == 1:
+ ret = data.rstrip(line_sep).split(line_sep)
+ else:
+ ret = data.split(line_sep)
+ else:
+ ret = []
+ return ret
+
+
+def get_mount_size(mountpoint):
+ mount_size = {}
+
+ try:
+ statvfs_result = os.statvfs(mountpoint)
+ mount_size['size_total'] = statvfs_result.f_frsize * statvfs_result.f_blocks
+ mount_size['size_available'] = statvfs_result.f_frsize * (statvfs_result.f_bavail)
+
+ # Block total/available/used
+ mount_size['block_size'] = statvfs_result.f_bsize
+ mount_size['block_total'] = statvfs_result.f_blocks
+ mount_size['block_available'] = statvfs_result.f_bavail
+ mount_size['block_used'] = mount_size['block_total'] - mount_size['block_available']
+
+ # Inode total/available/used
+ mount_size['inode_total'] = statvfs_result.f_files
+ mount_size['inode_available'] = statvfs_result.f_favail
+ mount_size['inode_used'] = mount_size['inode_total'] - mount_size['inode_available']
+ except OSError:
+ pass
+
+ return mount_size
diff --git a/lib/ansible/module_utils/facts/virtual/__init__.py b/lib/ansible/module_utils/facts/virtual/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/__init__.py
diff --git a/lib/ansible/module_utils/facts/virtual/base.py b/lib/ansible/module_utils/facts/virtual/base.py
new file mode 100644
index 0000000..67b59a5
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/base.py
@@ -0,0 +1,80 @@
+# base classes for virtualization facts
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Virtual:
+ """
+ This is a generic Virtual subclass of Facts. This should be further
+ subclassed to implement per platform. If you subclass this,
+ you should define:
+ - virtualization_type
+ - virtualization_role
+ - container (e.g. solaris zones, freebsd jails, linux containers)
+
+ All subclasses MUST define platform.
+ """
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init if we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ # FIXME: just here for existing tests cases till they are updated
+ def populate(self, collected_facts=None):
+ virtual_facts = self.get_virtual_facts()
+
+ return virtual_facts
+
+ def get_virtual_facts(self):
+ virtual_facts = {
+ 'virtualization_type': '',
+ 'virtualization_role': '',
+ 'virtualization_tech_guest': set(),
+ 'virtualization_tech_host': set(),
+ }
+ return virtual_facts
+
+
+class VirtualCollector(BaseFactCollector):
+ name = 'virtual'
+ _fact_class = Virtual
+ _fact_ids = set([
+ 'virtualization_type',
+ 'virtualization_role',
+ 'virtualization_tech_guest',
+ 'virtualization_tech_host',
+ ]) # type: t.Set[str]
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/virtual/dragonfly.py b/lib/ansible/module_utils/facts/virtual/dragonfly.py
new file mode 100644
index 0000000..b176f8b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/dragonfly.py
@@ -0,0 +1,25 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtual, VirtualCollector
+
+
+class DragonFlyVirtualCollector(VirtualCollector):
+ # Note the _fact_class impl is actually the FreeBSDVirtual impl
+ _fact_class = FreeBSDVirtual
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/virtual/freebsd.py b/lib/ansible/module_utils/facts/virtual/freebsd.py
new file mode 100644
index 0000000..7062d01
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/freebsd.py
@@ -0,0 +1,79 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+
+class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ """
+ This is a FreeBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'FreeBSD'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ if os.path.exists('/dev/xen/xenstore'):
+ guest_tech.add('xen')
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ kern_vm_guest = self.detect_virt_product('kern.vm_guest')
+ guest_tech.update(kern_vm_guest['virtualization_tech_guest'])
+ host_tech.update(kern_vm_guest['virtualization_tech_host'])
+
+ hw_hv_vendor = self.detect_virt_product('hw.hv_vendor')
+ guest_tech.update(hw_hv_vendor['virtualization_tech_guest'])
+ host_tech.update(hw_hv_vendor['virtualization_tech_host'])
+
+ sec_jail_jailed = self.detect_virt_product('security.jail.jailed')
+ guest_tech.update(sec_jail_jailed['virtualization_tech_guest'])
+ host_tech.update(sec_jail_jailed['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ sysctl = kern_vm_guest or hw_hv_vendor or sec_jail_jailed
+ # We call update here, then re-set virtualization_tech_host/guest
+ # later.
+ virtual_facts.update(sysctl)
+
+ virtual_vendor_facts = self.detect_virt_vendor('hw.model')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts.update(virtual_vendor_facts)
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class FreeBSDVirtualCollector(VirtualCollector):
+ _fact_class = FreeBSDVirtual
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/hpux.py b/lib/ansible/module_utils/facts/virtual/hpux.py
new file mode 100644
index 0000000..1057482
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/hpux.py
@@ -0,0 +1,72 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+
+
+class HPUXVirtual(Virtual):
+ """
+ This is a HP-UX specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'HP-UX'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ if os.path.exists('/usr/sbin/vecheck'):
+ rc, out, err = self.module.run_command("/usr/sbin/vecheck")
+ if rc == 0:
+ guest_tech.add('HP vPar')
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HP vPar'
+ if os.path.exists('/opt/hpvm/bin/hpvminfo'):
+ rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
+ if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
+ guest_tech.add('HPVM vPar')
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HPVM vPar'
+ elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
+ guest_tech.add('HPVM IVM')
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HPVM IVM'
+ elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
+ guest_tech.add('HPVM')
+ virtual_facts['virtualization_type'] = 'host'
+ virtual_facts['virtualization_role'] = 'HPVM'
+ if os.path.exists('/usr/sbin/parstatus'):
+ rc, out, err = self.module.run_command("/usr/sbin/parstatus")
+ if rc == 0:
+ guest_tech.add('HP nPar')
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HP nPar'
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class HPUXVirtualCollector(VirtualCollector):
+ _fact_class = HPUXVirtual
+ _platform = 'HP-UX'
diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py
new file mode 100644
index 0000000..31fa061
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/linux.py
@@ -0,0 +1,405 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines
+
+
+class LinuxVirtual(Virtual):
+ """
+ This is a Linux-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'Linux'
+
+ # For more information, check: http://people.redhat.com/~rjones/virt-what/
+ def get_virtual_facts(self):
+ virtual_facts = {}
+
+ # We want to maintain compatibility with the old "virtualization_type"
+ # and "virtualization_role" entries, so we need to track if we found
+ # them. We won't return them until the end, but if we found them early,
+ # we should avoid updating them again.
+ found_virt = False
+
+ # But as we go along, we also want to track virt tech the new way.
+ host_tech = set()
+ guest_tech = set()
+
+ # lxc/docker
+ if os.path.exists('/proc/1/cgroup'):
+ for line in get_file_lines('/proc/1/cgroup'):
+ if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
+ guest_tech.add('docker')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'docker'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
+ guest_tech.add('lxc')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.search('/system.slice/containerd.service', line):
+ guest_tech.add('containerd')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'containerd'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
+ if os.path.exists('/proc/1/environ'):
+ for line in get_file_lines('/proc/1/environ', line_sep='\x00'):
+ if re.search('container=lxc', line):
+ guest_tech.add('lxc')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.search('container=podman', line):
+ guest_tech.add('podman')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'podman'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.search('^container=.', line):
+ guest_tech.add('container')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'container'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
+ virtual_facts['virtualization_type'] = 'openvz'
+ if os.path.exists('/proc/bc'):
+ host_tech.add('openvz')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ guest_tech.add('openvz')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ systemd_container = get_file_content('/run/systemd/container')
+ if systemd_container:
+ guest_tech.add(systemd_container)
+ if not found_virt:
+ virtual_facts['virtualization_type'] = systemd_container
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # If docker/containerd has a custom cgroup parent, checking /proc/1/cgroup (above) might fail.
+ # https://docs.docker.com/engine/reference/commandline/dockerd/#default-cgroup-parent
+ # Fallback to more rudimentary checks.
+ if os.path.exists('/.dockerenv') or os.path.exists('/.dockerinit'):
+ guest_tech.add('docker')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'docker'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # ensure 'container' guest_tech is appropriately set
+ if guest_tech.intersection(set(['docker', 'lxc', 'podman', 'openvz', 'containerd'])) or systemd_container:
+ guest_tech.add('container')
+
+ if os.path.exists("/proc/xen"):
+ is_xen_host = False
+ try:
+ for line in get_file_lines('/proc/xen/capabilities'):
+ if "control_d" in line:
+ is_xen_host = True
+ except IOError:
+ pass
+
+ if is_xen_host:
+ host_tech.add('xen')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # assume guest for this block
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+
+ product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
+ sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
+ product_family = get_file_content('/sys/devices/virtual/dmi/id/product_family')
+
+ if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'):
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
+
+ if sys_vendor == 'oVirt':
+ guest_tech.add('oVirt')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'oVirt'
+ found_virt = True
+
+ if sys_vendor == 'Red Hat':
+ if product_family == 'RHV':
+ guest_tech.add('RHV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHV'
+ found_virt = True
+ elif product_name == 'RHEV Hypervisor':
+ guest_tech.add('RHEV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHEV'
+ found_virt = True
+
+ if product_name in ('VMware Virtual Platform', 'VMware7,1'):
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VMware'
+ found_virt = True
+
+ if product_name in ('OpenStack Compute', 'OpenStack Nova'):
+ guest_tech.add('openstack')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'openstack'
+ found_virt = True
+
+ bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+
+ if bios_vendor == 'Xen':
+ guest_tech.add('xen')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'xen'
+ found_virt = True
+
+ if bios_vendor == 'innotek GmbH':
+ guest_tech.add('virtualbox')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ found_virt = True
+
+ if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
+
+ KVM_SYS_VENDORS = ('QEMU', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix')
+ if sys_vendor in KVM_SYS_VENDORS:
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ found_virt = True
+
+ if sys_vendor == 'KubeVirt':
+ guest_tech.add('KubeVirt')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'KubeVirt'
+ found_virt = True
+
+ # FIXME: This does also match hyperv
+ if sys_vendor == 'Microsoft Corporation':
+ guest_tech.add('VirtualPC')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VirtualPC'
+ found_virt = True
+
+ if sys_vendor == 'Parallels Software International Inc.':
+ guest_tech.add('parallels')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'parallels'
+ found_virt = True
+
+ if sys_vendor == 'OpenStack Foundation':
+ guest_tech.add('openstack')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'openstack'
+ found_virt = True
+
+ # unassume guest
+ if not found_virt:
+ del virtual_facts['virtualization_role']
+
+ if os.path.exists('/proc/self/status'):
+ for line in get_file_lines('/proc/self/status'):
+ if re.match(r'^VxID:\s+\d+', line):
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'linux_vserver'
+ if re.match(r'^VxID:\s+0', line):
+ host_tech.add('linux_vserver')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ guest_tech.add('linux_vserver')
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ if os.path.exists('/proc/cpuinfo'):
+ for line in get_file_lines('/proc/cpuinfo'):
+ if re.match('^model name.*QEMU Virtual CPU', line):
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ elif re.match('^vendor_id.*User Mode Linux', line):
+ guest_tech.add('uml')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'uml'
+ elif re.match('^model name.*UML', line):
+ guest_tech.add('uml')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'uml'
+ elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ elif re.match('^vendor_id.*PowerVM Lx86', line):
+ guest_tech.add('powervm_lx86')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'powervm_lx86'
+ elif re.match('^vendor_id.*IBM/S390', line):
+ guest_tech.add('PR/SM')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'PR/SM'
+ lscpu = self.module.get_bin_path('lscpu')
+ if lscpu:
+ rc, out, err = self.module.run_command(["lscpu"])
+ if rc == 0:
+ for line in out.splitlines():
+ data = line.split(":", 1)
+ key = data[0].strip()
+ if key == 'Hypervisor':
+ tech = data[1].strip()
+ guest_tech.add(tech)
+ if not found_virt:
+ virtual_facts['virtualization_type'] = tech
+ else:
+ guest_tech.add('ibm_systemz')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'ibm_systemz'
+ else:
+ continue
+ if virtual_facts['virtualization_type'] == 'PR/SM':
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'LPAR'
+ else:
+ if not found_virt:
+ virtual_facts['virtualization_role'] = 'guest'
+ if not found_virt:
+ found_virt = True
+
+ # Beware that we can have both kvm and virtualbox running on a single system
+ if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
+ modules = []
+ for line in get_file_lines("/proc/modules"):
+ data = line.split(" ", 1)
+ modules.append(data[0])
+
+ if 'kvm' in modules:
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'host'
+
+ if os.path.isdir('/rhev/'):
+ # Check whether this is a RHEV hypervisor (is vdsm running ?)
+ for f in glob.glob('/proc/[0-9]*/comm'):
+ try:
+ with open(f) as virt_fh:
+ comm_content = virt_fh.read().rstrip()
+
+ if comm_content in ('vdsm', 'vdsmd'):
+ # We add both kvm and RHEV to host_tech in this case.
+ # It's accurate. RHEV uses KVM.
+ host_tech.add('RHEV')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'RHEV'
+ break
+ except Exception:
+ pass
+
+ found_virt = True
+
+ if 'vboxdrv' in modules:
+ host_tech.add('virtualbox')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'host'
+ found_virt = True
+
+ if 'virtio' in modules:
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ # In older Linux Kernel versions, /sys filesystem is not available
+ # dmidecode is the safest option to parse virtualization related values
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ # We still want to continue even if dmidecode is not available
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin)
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
+ if vendor_name.startswith('VMware'):
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'VMware'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ if 'BHYVE' in out:
+ guest_tech.add('bhyve')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'bhyve'
+ virtual_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ if os.path.exists('/dev/kvm'):
+ host_tech.add('kvm')
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'host'
+ found_virt = True
+
+ # If none of the above matches, return 'NA' for virtualization_type
+ # and virtualization_role. This allows for proper grouping.
+ if not found_virt:
+ virtual_facts['virtualization_type'] = 'NA'
+ virtual_facts['virtualization_role'] = 'NA'
+ found_virt = True
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class LinuxVirtualCollector(VirtualCollector):
+ _fact_class = LinuxVirtual
+ _platform = 'Linux'
diff --git a/lib/ansible/module_utils/facts/virtual/netbsd.py b/lib/ansible/module_utils/facts/virtual/netbsd.py
new file mode 100644
index 0000000..b4ef14e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/netbsd.py
@@ -0,0 +1,73 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+
+class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ platform = 'NetBSD'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ virtual_product_facts = self.detect_virt_product('machdep.dmi.system-product')
+ guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_product_facts['virtualization_tech_host'])
+ virtual_facts.update(virtual_product_facts)
+
+ virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts.update(virtual_vendor_facts)
+
+ # The above logic is tried first for backwards compatibility. If
+ # something above matches, use it. Otherwise if the result is still
+ # empty, try machdep.hypervisor.
+ virtual_vendor_facts = self.detect_virt_vendor('machdep.hypervisor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts.update(virtual_vendor_facts)
+
+ if os.path.exists('/dev/xencons'):
+ guest_tech.add('xen')
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class NetBSDVirtualCollector(VirtualCollector):
+ _fact_class = NetBSDVirtual
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/openbsd.py b/lib/ansible/module_utils/facts/virtual/openbsd.py
new file mode 100644
index 0000000..c449028
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/openbsd.py
@@ -0,0 +1,74 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+from ansible.module_utils.facts.utils import get_file_content
+
+
+class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ """
+ This is a OpenBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'OpenBSD'
+ DMESG_BOOT = '/var/run/dmesg.boot'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ virtual_product_facts = self.detect_virt_product('hw.product')
+ guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_product_facts['virtualization_tech_host'])
+ virtual_facts.update(virtual_product_facts)
+
+ virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
+ guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
+ host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_facts.update(virtual_vendor_facts)
+
+ # Check the dmesg if vmm(4) attached, indicating the host is
+ # capable of virtualization.
+ dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
+ for line in dmesg_boot.splitlines():
+ match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
+ if match:
+ host_tech.add('vmm')
+ virtual_facts['virtualization_type'] = 'vmm'
+ virtual_facts['virtualization_role'] = 'host'
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class OpenBSDVirtualCollector(VirtualCollector):
+ _fact_class = OpenBSDVirtual
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/sunos.py b/lib/ansible/module_utils/facts/virtual/sunos.py
new file mode 100644
index 0000000..1e92677
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/sunos.py
@@ -0,0 +1,139 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+
+
+class SunOSVirtual(Virtual):
+ """
+ This is a SunOS-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ - container
+ """
+ platform = 'SunOS'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # Check if it's a zone
+ zonename = self.module.get_bin_path('zonename')
+ if zonename:
+ rc, out, err = self.module.run_command(zonename)
+ if rc == 0:
+ if out.rstrip() == "global":
+ host_tech.add('zone')
+ else:
+ guest_tech.add('zone')
+ virtual_facts['container'] = 'zone'
+
+ # Check if it's a branded zone (i.e. Solaris 8/9 zone)
+ if os.path.isdir('/.SUNWnative'):
+ guest_tech.add('zone')
+ virtual_facts['container'] = 'zone'
+
+ # If it's a zone check if we can detect if our global zone is itself virtualized.
+ # Relies on the "guest tools" (e.g. vmware tools) to be installed
+ if 'container' in virtual_facts and virtual_facts['container'] == 'zone':
+ modinfo = self.module.get_bin_path('modinfo')
+ if modinfo:
+ rc, out, err = self.module.run_command(modinfo)
+ if rc == 0:
+ for line in out.splitlines():
+ if 'VMware' in line:
+ guest_tech.add('vmware')
+ virtual_facts['virtualization_type'] = 'vmware'
+ virtual_facts['virtualization_role'] = 'guest'
+ if 'VirtualBox' in line:
+ guest_tech.add('virtualbox')
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ if os.path.exists('/proc/vz'):
+ guest_tech.add('virtuozzo')
+ virtual_facts['virtualization_type'] = 'virtuozzo'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ # Detect domaining on Sparc hardware
+ virtinfo = self.module.get_bin_path('virtinfo')
+ if virtinfo:
+ # The output of virtinfo is different whether we are on a machine with logical
+ # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
+ rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
+ # The output contains multiple lines with different keys like this:
+ # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
+ # The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
+ # virtinfo can only be run from the global zone
+ if rc == 0:
+ try:
+ for line in out.splitlines():
+ fields = line.split('|')
+ if fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms':
+ guest_tech.add('ldom')
+ virtual_facts['virtualization_type'] = 'ldom'
+ virtual_facts['virtualization_role'] = 'guest'
+ hostfeatures = []
+ for field in fields[2:]:
+ arg = field.split('=')
+ if arg[1] == 'true':
+ hostfeatures.append(arg[0])
+ if len(hostfeatures) > 0:
+ virtual_facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
+ except ValueError:
+ pass
+
+ else:
+ smbios = self.module.get_bin_path('smbios')
+ if not smbios:
+ return
+ rc, out, err = self.module.run_command(smbios)
+ if rc == 0:
+ for line in out.splitlines():
+ if 'VMware' in line:
+ guest_tech.add('vmware')
+ virtual_facts['virtualization_type'] = 'vmware'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'Parallels' in line:
+ guest_tech.add('parallels')
+ virtual_facts['virtualization_type'] = 'parallels'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'VirtualBox' in line:
+ guest_tech.add('virtualbox')
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'HVM domU' in line:
+ guest_tech.add('xen')
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'KVM' in line:
+ guest_tech.add('kvm')
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ virtual_facts['virtualization_tech_guest'] = guest_tech
+ virtual_facts['virtualization_tech_host'] = host_tech
+ return virtual_facts
+
+
+class SunOSVirtualCollector(VirtualCollector):
+ _fact_class = SunOSVirtual
+ _platform = 'SunOS'
diff --git a/lib/ansible/module_utils/facts/virtual/sysctl.py b/lib/ansible/module_utils/facts/virtual/sysctl.py
new file mode 100644
index 0000000..1c7b2b3
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/sysctl.py
@@ -0,0 +1,112 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+class VirtualSysctlDetectionMixin(object):
+ def detect_sysctl(self):
+ self.sysctl_path = self.module.get_bin_path('sysctl')
+
+ def detect_virt_product(self, key):
+ virtual_product_facts = {}
+ host_tech = set()
+ guest_tech = set()
+
+ # We do similar to what we do in linux.py -- We want to allow multiple
+ # virt techs to show up, but maintain compatibility, so we have to track
+ # when we would have stopped, even though now we go through everything.
+ found_virt = False
+
+ self.detect_sysctl()
+ if self.sysctl_path:
+ rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
+ if rc == 0:
+ if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
+ guest_tech.add('kvm')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'kvm'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.match('.*VMware.*', out):
+ guest_tech.add('VMware')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'VMware'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'VirtualBox':
+ guest_tech.add('virtualbox')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'virtualbox'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if re.match('(HVM domU|XenPVH|XenPV|XenPVHVM).*', out):
+ guest_tech.add('xen')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'xen'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'Hyper-V':
+ guest_tech.add('Hyper-V')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'Hyper-V'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'Parallels':
+ guest_tech.add('parallels')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'parallels'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if out.rstrip() == 'RHEV Hypervisor':
+ guest_tech.add('RHEV')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'RHEV'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+ if (key == 'security.jail.jailed') and (out.rstrip() == '1'):
+ guest_tech.add('jails')
+ if not found_virt:
+ virtual_product_facts['virtualization_type'] = 'jails'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ found_virt = True
+
+ virtual_product_facts['virtualization_tech_guest'] = guest_tech
+ virtual_product_facts['virtualization_tech_host'] = host_tech
+ return virtual_product_facts
+
+ def detect_virt_vendor(self, key):
+ virtual_vendor_facts = {}
+ host_tech = set()
+ guest_tech = set()
+ self.detect_sysctl()
+ if self.sysctl_path:
+ rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
+ if rc == 0:
+ if out.rstrip() == 'QEMU':
+ guest_tech.add('kvm')
+ virtual_vendor_facts['virtualization_type'] = 'kvm'
+ virtual_vendor_facts['virtualization_role'] = 'guest'
+ if out.rstrip() == 'OpenBSD':
+ guest_tech.add('vmm')
+ virtual_vendor_facts['virtualization_type'] = 'vmm'
+ virtual_vendor_facts['virtualization_role'] = 'guest'
+
+ virtual_vendor_facts['virtualization_tech_guest'] = guest_tech
+ virtual_vendor_facts['virtualization_tech_host'] = host_tech
+ return virtual_vendor_facts
diff --git a/lib/ansible/module_utils/json_utils.py b/lib/ansible/module_utils/json_utils.py
new file mode 100644
index 0000000..0e95aa6
--- /dev/null
+++ b/lib/ansible/module_utils/json_utils.py
@@ -0,0 +1,79 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
+# changes are propagated there.
+def _filter_non_json_lines(data, objects_only=False):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ endchar = u'}'
+ break
+ elif not objects_only and line.startswith(u'['):
+ endchar = u']'
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(endchar):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ for line in trailing_junk:
+ if line.strip():
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+ break
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
diff --git a/lib/ansible/module_utils/parsing/__init__.py b/lib/ansible/module_utils/parsing/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/parsing/__init__.py
diff --git a/lib/ansible/module_utils/parsing/convert_bool.py b/lib/ansible/module_utils/parsing/convert_bool.py
new file mode 100644
index 0000000..7eea875
--- /dev/null
+++ b/lib/ansible/module_utils/parsing/convert_bool.py
@@ -0,0 +1,29 @@
+# Copyright: 2017, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause )
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils._text import to_text
+
+
+BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True))
+BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False))
+BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE)
+
+
+def boolean(value, strict=True):
+ if isinstance(value, bool):
+ return value
+
+ normalized_value = value
+ if isinstance(value, (text_type, binary_type)):
+ normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip()
+
+ if normalized_value in BOOLEANS_TRUE:
+ return True
+ elif normalized_value in BOOLEANS_FALSE or not strict:
+ return False
+
+ raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
new file mode 100644
index 0000000..6dc2917
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
@@ -0,0 +1,398 @@
+# Copyright (c) 2018 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Add-CSharpType {
+ <#
+ .SYNOPSIS
+ Compiles one or more C# scripts similar to Add-Type. This exposes
+ more configuration options that are useable within Ansible and it
+ also allows multiple C# sources to be compiled together.
+
+ .PARAMETER References
+ [String[]] A collection of C# scripts to compile together.
+
+ .PARAMETER IgnoreWarnings
+ [Switch] Whether to compile code that contains compiler warnings, by
+ default warnings will cause a compiler error.
+
+ .PARAMETER PassThru
+ [Switch] Whether to return the loaded Assembly
+
+ .PARAMETER AnsibleModule
+ [Ansible.Basic.AnsibleModule] used to derive the TempPath and Debug values.
+ TempPath is set to the Tmpdir property of the class
+ IncludeDebugInfo is set when the Ansible verbosity is >= 3
+
+ .PARAMETER TempPath
+ [String] The temporary directory in which the dynamic assembly is
+ compiled to. This file is deleted once compilation is complete.
+ Cannot be used when AnsibleModule is set. This is a no-op when
+ running on PSCore.
+
+ .PARAMETER IncludeDebugInfo
+ [Switch] Whether to include debug information in the compiled
+ assembly. Cannot be used when AnsibleModule is set. This is a no-op
+ when running on PSCore.
+
+ .PARAMETER CompileSymbols
+ [String[]] A list of symbols to be defined during compile time. These are
+ added to the existing symbols, 'CORECLR', 'WINDOWS', 'UNIX' that are set
+ conditionalls in this cmdlet.
+
+ .NOTES
+ The following features were added to control the compiling options from the
+ code itself.
+
+ * Predefined compiler SYMBOLS
+
+ * CORECLR - Added when running on PowerShell Core.
+ * WINDOWS - Added when running on Windows.
+ * UNIX - Added when running on non-Windows.
+ * X86 - Added when running on a 32-bit process (Ansible 2.10+)
+ * AMD64 - Added when running on a 64-bit process (Ansible 2.10+)
+
+ * Ignore compiler warnings inline with the following comment inline
+
+ //NoWarn -Name <rule code> [-CLR Core|Framework]
+
+ * Specify custom assembly references inline
+
+ //AssemblyReference -Name Dll.Location.dll [-CLR Core|Framework]
+
+ # Added in Ansible 2.10
+ //AssemblyReference -Type System.Type.Name [-CLR Core|Framework]
+
+ * Create automatic type accelerators to simplify long namespace names (Ansible 2.9+)
+
+ //TypeAccelerator -Name <AcceleratorName> -TypeName <Name of compiled type>
+ #>
+ param(
+ [Parameter(Mandatory = $true)][AllowEmptyCollection()][String[]]$References,
+ [Switch]$IgnoreWarnings,
+ [Switch]$PassThru,
+ [Parameter(Mandatory = $true, ParameterSetName = "Module")][Object]$AnsibleModule,
+ [Parameter(ParameterSetName = "Manual")][String]$TempPath = $env:TMP,
+ [Parameter(ParameterSetName = "Manual")][Switch]$IncludeDebugInfo,
+ [String[]]$CompileSymbols = @()
+ )
+ if ($null -eq $References -or $References.Length -eq 0) {
+ return
+ }
+
+ # define special symbols CORECLR, WINDOWS, UNIX if required
+ # the Is* variables are defined on PSCore, if absent we assume an
+ # older version of PowerShell under .NET Framework and Windows
+ $defined_symbols = [System.Collections.ArrayList]$CompileSymbols
+
+ if ([System.IntPtr]::Size -eq 4) {
+ $defined_symbols.Add('X86') > $null
+ }
+ else {
+ $defined_symbols.Add('AMD64') > $null
+ }
+
+ $is_coreclr = Get-Variable -Name IsCoreCLR -ErrorAction SilentlyContinue
+ if ($null -ne $is_coreclr) {
+ if ($is_coreclr.Value) {
+ $defined_symbols.Add("CORECLR") > $null
+ }
+ }
+ $is_windows = Get-Variable -Name IsWindows -ErrorAction SilentlyContinue
+ if ($null -ne $is_windows) {
+ if ($is_windows.Value) {
+ $defined_symbols.Add("WINDOWS") > $null
+ }
+ else {
+ $defined_symbols.Add("UNIX") > $null
+ }
+ }
+ else {
+ $defined_symbols.Add("WINDOWS") > $null
+ }
+
+ # Store any TypeAccelerators shortcuts the util wants us to set
+ $type_accelerators = [System.Collections.Generic.List`1[Hashtable]]@()
+
+ # pattern used to find referenced assemblies in the code
+ $assembly_pattern = [Regex]"//\s*AssemblyReference\s+-(?<Parameter>(Name)|(Type))\s+(?<Name>[\w.]*)(\s+-CLR\s+(?<CLR>Core|Framework))?"
+ $no_warn_pattern = [Regex]"//\s*NoWarn\s+-Name\s+(?<Name>[\w\d]*)(\s+-CLR\s+(?<CLR>Core|Framework))?"
+ $type_pattern = [Regex]"//\s*TypeAccelerator\s+-Name\s+(?<Name>[\w.]*)\s+-TypeName\s+(?<TypeName>[\w.]*)"
+
+ # PSCore vs PSDesktop use different methods to compile the code,
+ # PSCore uses Roslyn and can compile the code purely in memory
+ # without touching the disk while PSDesktop uses CodeDom and csc.exe
+ # to compile the code. We branch out here and run each
+ # distribution's method to add our C# code.
+ if ($is_coreclr) {
+ # compile the code using Roslyn on PSCore
+
+ # Include the default assemblies using the logic in Add-Type
+ # https://github.com/PowerShell/PowerShell/blob/master/src/Microsoft.PowerShell.Commands.Utility/commands/utility/AddType.cs
+ $assemblies = [System.Collections.Generic.HashSet`1[Microsoft.CodeAnalysis.MetadataReference]]@(
+ [Microsoft.CodeAnalysis.CompilationReference]::CreateFromFile(([System.Reflection.Assembly]::GetAssembly([PSObject])).Location)
+ )
+ $netcore_app_ref_folder = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName([PSObject].Assembly.Location), "ref")
+ $lib_assembly_location = [System.IO.Path]::GetDirectoryName([object].Assembly.Location)
+ foreach ($file in [System.IO.Directory]::EnumerateFiles($netcore_app_ref_folder, "*.dll", [System.IO.SearchOption]::TopDirectoryOnly)) {
+ $assemblies.Add([Microsoft.CodeAnalysis.MetadataReference]::CreateFromFile($file)) > $null
+ }
+
+ # loop through the references, parse as a SyntaxTree and get
+ # referenced assemblies
+ $ignore_warnings = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[String], [Microsoft.CodeAnalysis.ReportDiagnostic]]'
+ $parse_options = ([Microsoft.CodeAnalysis.CSharp.CSharpParseOptions]::Default).WithPreprocessorSymbols($defined_symbols)
+ $syntax_trees = [System.Collections.Generic.List`1[Microsoft.CodeAnalysis.SyntaxTree]]@()
+ foreach ($reference in $References) {
+ # scan through code and add any assemblies that match
+ # //AssemblyReference -Name ... [-CLR Core]
+ # //NoWarn -Name ... [-CLR Core]
+ # //TypeAccelerator -Name ... -TypeName ...
+ $assembly_matches = $assembly_pattern.Matches($reference)
+ foreach ($match in $assembly_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Core") {
+ continue
+ }
+
+ $parameter_type = $match.Groups["Parameter"].Value
+ $assembly_path = $match.Groups["Name"].Value
+ if ($parameter_type -eq "Type") {
+ $assembly_path = ([Type]$assembly_path).Assembly.Location
+ }
+ else {
+ if (-not ([System.IO.Path]::IsPathRooted($assembly_path))) {
+ $assembly_path = Join-Path -Path $lib_assembly_location -ChildPath $assembly_path
+ }
+ }
+ $assemblies.Add([Microsoft.CodeAnalysis.MetadataReference]::CreateFromFile($assembly_path)) > $null
+ }
+ $warn_matches = $no_warn_pattern.Matches($reference)
+ foreach ($match in $warn_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Core") {
+ continue
+ }
+ $ignore_warnings.Add($match.Groups["Name"], [Microsoft.CodeAnalysis.ReportDiagnostic]::Suppress)
+ }
+ $syntax_trees.Add([Microsoft.CodeAnalysis.CSharp.CSharpSyntaxTree]::ParseText($reference, $parse_options)) > $null
+
+ $type_matches = $type_pattern.Matches($reference)
+ foreach ($match in $type_matches) {
+ $type_accelerators.Add(@{Name = $match.Groups["Name"].Value; TypeName = $match.Groups["TypeName"].Value })
+ }
+ }
+
+ # Release seems to contain the correct line numbers compared to
+ # debug,may need to keep a closer eye on this in the future
+ $compiler_options = (New-Object -TypeName Microsoft.CodeAnalysis.CSharp.CSharpCompilationOptions -ArgumentList @(
+ [Microsoft.CodeAnalysis.OutputKind]::DynamicallyLinkedLibrary
+ )).WithOptimizationLevel([Microsoft.CodeAnalysis.OptimizationLevel]::Release)
+
+ # set warnings to error out if IgnoreWarnings is not set
+ if (-not $IgnoreWarnings.IsPresent) {
+ $compiler_options = $compiler_options.WithGeneralDiagnosticOption([Microsoft.CodeAnalysis.ReportDiagnostic]::Error)
+ $compiler_options = $compiler_options.WithSpecificDiagnosticOptions($ignore_warnings)
+ }
+
+ # create compilation object
+ $compilation = [Microsoft.CodeAnalysis.CSharp.CSharpCompilation]::Create(
+ [System.Guid]::NewGuid().ToString(),
+ $syntax_trees,
+ $assemblies,
+ $compiler_options
+ )
+
+ # Load the compiled code and pdb info, we do this so we can
+ # include line number in a stracktrace
+ $code_ms = New-Object -TypeName System.IO.MemoryStream
+ $pdb_ms = New-Object -TypeName System.IO.MemoryStream
+ try {
+ $emit_result = $compilation.Emit($code_ms, $pdb_ms)
+ if (-not $emit_result.Success) {
+ $errors = [System.Collections.ArrayList]@()
+
+ foreach ($e in $emit_result.Diagnostics) {
+ # builds the error msg, based on logic in Add-Type
+ # https://github.com/PowerShell/PowerShell/blob/master/src/Microsoft.PowerShell.Commands.Utility/commands/utility/AddType.cs#L1239
+ if ($null -eq $e.Location.SourceTree) {
+ $errors.Add($e.ToString()) > $null
+ continue
+ }
+
+ $cancel_token = New-Object -TypeName System.Threading.CancellationToken -ArgumentList $false
+ $text_lines = $e.Location.SourceTree.GetText($cancel_token).Lines
+ $line_span = $e.Location.GetLineSpan()
+
+ $diagnostic_message = $e.ToString()
+ $error_line_string = $text_lines[$line_span.StartLinePosition.Line].ToString()
+ $error_position = $line_span.StartLinePosition.Character
+
+ $sb = New-Object -TypeName System.Text.StringBuilder -ArgumentList ($diagnostic_message.Length + $error_line_string.Length * 2 + 4)
+ $sb.AppendLine($diagnostic_message)
+ $sb.AppendLine($error_line_string)
+
+ for ($i = 0; $i -lt $error_line_string.Length; $i++) {
+ if ([System.Char]::IsWhiteSpace($error_line_string[$i])) {
+ continue
+ }
+ $sb.Append($error_line_string, 0, $i)
+ $sb.Append(' ', [Math]::Max(0, $error_position - $i))
+ $sb.Append("^")
+ break
+ }
+
+ $errors.Add($sb.ToString()) > $null
+ }
+
+ throw [InvalidOperationException]"Failed to compile C# code:`r`n$($errors -join "`r`n")"
+ }
+
+ $code_ms.Seek(0, [System.IO.SeekOrigin]::Begin) > $null
+ $pdb_ms.Seek(0, [System.IO.SeekOrigin]::Begin) > $null
+ $compiled_assembly = [System.Runtime.Loader.AssemblyLoadContext]::Default.LoadFromStream($code_ms, $pdb_ms)
+ }
+ finally {
+ $code_ms.Close()
+ $pdb_ms.Close()
+ }
+ }
+ else {
+ # compile the code using CodeDom on PSDesktop
+
+ # configure compile options based on input
+ if ($PSCmdlet.ParameterSetName -eq "Module") {
+ $temp_path = $AnsibleModule.Tmpdir
+ $include_debug = $AnsibleModule.Verbosity -ge 3
+ }
+ else {
+ $temp_path = $TempPath
+ $include_debug = $IncludeDebugInfo.IsPresent
+ }
+ $compiler_options = [System.Collections.ArrayList]@("/optimize")
+ if ($defined_symbols.Count -gt 0) {
+ $compiler_options.Add("/define:" + ([String]::Join(";", $defined_symbols.ToArray()))) > $null
+ }
+
+ $compile_parameters = New-Object -TypeName System.CodeDom.Compiler.CompilerParameters
+ $compile_parameters.GenerateExecutable = $false
+ $compile_parameters.GenerateInMemory = $true
+ $compile_parameters.TreatWarningsAsErrors = (-not $IgnoreWarnings.IsPresent)
+ $compile_parameters.IncludeDebugInformation = $include_debug
+ $compile_parameters.TempFiles = (New-Object -TypeName System.CodeDom.Compiler.TempFileCollection -ArgumentList $temp_path, $false)
+
+ # Add-Type automatically references System.dll, System.Core.dll,
+ # and System.Management.Automation.dll which we replicate here
+ $assemblies = [System.Collections.Generic.HashSet`1[String]]@(
+ "System.dll",
+ "System.Core.dll",
+ ([System.Reflection.Assembly]::GetAssembly([PSObject])).Location
+ )
+
+ # create a code snippet for each reference and check if we need
+ # to reference any extra assemblies
+ $ignore_warnings = [System.Collections.ArrayList]@()
+ $compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@()
+ foreach ($reference in $References) {
+ # scan through code and add any assemblies that match
+ # //AssemblyReference -Name ... [-CLR Framework]
+ # //NoWarn -Name ... [-CLR Framework]
+ # //TypeAccelerator -Name ... -TypeName ...
+ $assembly_matches = $assembly_pattern.Matches($reference)
+ foreach ($match in $assembly_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Framework") {
+ continue
+ }
+
+ $parameter_type = $match.Groups["Parameter"].Value
+ $assembly_path = $match.Groups["Name"].Value
+ if ($parameter_type -eq "Type") {
+ $assembly_path = ([Type]$assembly_path).Assembly.Location
+ }
+ $assemblies.Add($assembly_path) > $null
+ }
+ $warn_matches = $no_warn_pattern.Matches($reference)
+ foreach ($match in $warn_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Framework") {
+ continue
+ }
+ $warning_id = $match.Groups["Name"].Value
+ # /nowarn should only contain the numeric part
+ if ($warning_id.StartsWith("CS")) {
+ $warning_id = $warning_id.Substring(2)
+ }
+ $ignore_warnings.Add($warning_id) > $null
+ }
+ $compile_units.Add((New-Object -TypeName System.CodeDom.CodeSnippetCompileUnit -ArgumentList $reference)) > $null
+
+ $type_matches = $type_pattern.Matches($reference)
+ foreach ($match in $type_matches) {
+ $type_accelerators.Add(@{Name = $match.Groups["Name"].Value; TypeName = $match.Groups["TypeName"].Value })
+ }
+ }
+ if ($ignore_warnings.Count -gt 0) {
+ $compiler_options.Add("/nowarn:" + ([String]::Join(",", $ignore_warnings.ToArray()))) > $null
+ }
+ $compile_parameters.ReferencedAssemblies.AddRange($assemblies)
+ $compile_parameters.CompilerOptions = [String]::Join(" ", $compiler_options.ToArray())
+
+ # compile the code together and check for errors
+ $provider = New-Object -TypeName Microsoft.CSharp.CSharpCodeProvider
+
+ # This calls csc.exe which can take compiler options from environment variables. Currently these env vars
+ # are known to have problems so they are unset:
+ # LIB - additional library paths will fail the compilation if they are invalid
+ $originalEnv = @{}
+ try {
+ 'LIB' | ForEach-Object -Process {
+ $value = Get-Item -LiteralPath "Env:\$_" -ErrorAction SilentlyContinue
+ if ($value) {
+ $originalEnv[$_] = $value
+ Remove-Item -LiteralPath "Env:\$_"
+ }
+ }
+
+ $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ }
+ finally {
+ foreach ($kvp in $originalEnv.GetEnumerator()) {
+ [System.Environment]::SetEnvironmentVariable($kvp.Key, $kvp.Value, "Process")
+ }
+ }
+
+ if ($compile.Errors.HasErrors) {
+ $msg = "Failed to compile C# code: "
+ foreach ($e in $compile.Errors) {
+ $msg += "`r`n" + $e.ToString()
+ }
+ throw [InvalidOperationException]$msg
+ }
+ $compiled_assembly = $compile.CompiledAssembly
+ }
+
+ $type_accelerator = [PSObject].Assembly.GetType("System.Management.Automation.TypeAccelerators")
+ foreach ($accelerator in $type_accelerators) {
+ $type_name = $accelerator.TypeName
+ $found = $false
+
+ foreach ($assembly_type in $compiled_assembly.GetTypes()) {
+ if ($assembly_type.Name -eq $type_name) {
+ $type_accelerator::Add($accelerator.Name, $assembly_type)
+ $found = $true
+ break
+ }
+ }
+ if (-not $found) {
+ throw "Failed to find compiled class '$type_name' for custom TypeAccelerator."
+ }
+ }
+
+ # return the compiled assembly if PassThru is set.
+ if ($PassThru) {
+ return $compiled_assembly
+ }
+}
+
+Export-ModuleMember -Function Add-CSharpType
+
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1
new file mode 100644
index 0000000..53d6870
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1
@@ -0,0 +1,78 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# The rules used in these functions are derived from the below
+# https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
+# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
+
+Function Escape-Argument($argument, $force_quote = $false) {
+ # this converts a single argument to an escaped version, use Join-Arguments
+ # instead of this function as this only escapes a single string.
+
+ # check if argument contains a space, \n, \t, \v or "
+ if ($force_quote -eq $false -and $argument.Length -gt 0 -and $argument -notmatch "[ \n\t\v`"]") {
+ # argument does not need escaping (and we don't want to force it),
+ # return as is
+ return $argument
+ }
+ else {
+ # we need to quote the arg so start with "
+ $new_argument = '"'
+
+ for ($i = 0; $i -lt $argument.Length; $i++) {
+ $num_backslashes = 0
+
+ # get the number of \ from current char until end or not a \
+ while ($i -ne ($argument.Length - 1) -and $argument[$i] -eq "\") {
+ $num_backslashes++
+ $i++
+ }
+
+ $current_char = $argument[$i]
+ if ($i -eq ($argument.Length - 1) -and $current_char -eq "\") {
+ # We are at the end of the string so we need to add the same \
+ # * 2 as the end char would be a "
+ $new_argument += ("\" * ($num_backslashes + 1) * 2)
+ }
+ elseif ($current_char -eq '"') {
+ # we have a inline ", we need to add the existing \ but * by 2
+ # plus another 1
+ $new_argument += ("\" * (($num_backslashes * 2) + 1))
+ $new_argument += $current_char
+ }
+ else {
+ # normal character so no need to escape the \ we have counted
+ $new_argument += ("\" * $num_backslashes)
+ $new_argument += $current_char
+ }
+ }
+
+ # we need to close the special arg with a "
+ $new_argument += '"'
+ return $new_argument
+ }
+}
+
+Function Argv-ToString($arguments, $force_quote = $false) {
+ # Takes in a list of un escaped arguments and convert it to a single string
+ # that can be used when starting a new process. It will escape the
+ # characters as necessary in the list.
+ # While there is a CommandLineToArgvW function there is a no
+ # ArgvToCommandLineW that we can call to convert a list to an escaped
+ # string.
+ # You can also pass in force_quote so that each argument is quoted even
+ # when not necessary, by default only arguments with certain characters are
+ # quoted.
+ # TODO: add in another switch which will escape the args for cmd.exe
+
+ $escaped_arguments = @()
+ foreach ($argument in $arguments) {
+ $escaped_argument = Escape-Argument -argument $argument -force_quote $force_quote
+ $escaped_arguments += $escaped_argument
+ }
+
+ return ($escaped_arguments -join ' ')
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1
new file mode 100644
index 0000000..ca4f5ba
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1
@@ -0,0 +1,34 @@
+# Copyright (c): 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Backup-File {
+ <#
+ .SYNOPSIS
+ Helper function to make a backup of a file.
+ .EXAMPLE
+ Backup-File -path $path -WhatIf:$check_mode
+#>
+ [CmdletBinding(SupportsShouldProcess = $true)]
+
+ Param (
+ [Parameter(Mandatory = $true, ValueFromPipeline = $true)]
+ [string] $path
+ )
+
+ Process {
+ $backup_path = $null
+ if (Test-Path -LiteralPath $path -PathType Leaf) {
+ $backup_path = "$path.$pid." + [DateTime]::Now.ToString("yyyyMMdd-HHmmss") + ".bak";
+ Try {
+ Copy-Item -LiteralPath $path -Destination $backup_path
+ }
+ Catch {
+ throw "Failed to create backup file '$backup_path' from '$path'. ($($_.Exception.Message))"
+ }
+ }
+ return $backup_path
+ }
+}
+
+# This line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Function Backup-File
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
new file mode 100644
index 0000000..9b86f84
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
@@ -0,0 +1,69 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# used by Convert-DictToSnakeCase to convert a string in camelCase
+# format to snake_case
+Function Convert-StringToSnakeCase($string) {
+ # cope with pluralized abbreaviations such as TargetGroupARNs
+ if ($string -cmatch "[A-Z]{3,}s") {
+ $replacement_string = $string -creplace $matches[0], "_$($matches[0].ToLower())"
+
+ # handle when there was nothing before the plural pattern
+ if ($replacement_string.StartsWith("_") -and -not $string.StartsWith("_")) {
+ $replacement_string = $replacement_string.Substring(1)
+ }
+ $string = $replacement_string
+ }
+ $string = $string -creplace "(.)([A-Z][a-z]+)", '$1_$2'
+ $string = $string -creplace "([a-z0-9])([A-Z])", '$1_$2'
+ $string = $string.ToLower()
+
+ return $string
+}
+
+# used by Convert-DictToSnakeCase to covert list entries from camelCase
+# to snake_case
+Function Convert-ListToSnakeCase($list) {
+ $snake_list = [System.Collections.ArrayList]@()
+ foreach ($value in $list) {
+ if ($value -is [Hashtable]) {
+ $new_value = Convert-DictToSnakeCase -dict $value
+ }
+ elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) {
+ $new_value = Convert-ListToSnakeCase -list $value
+ }
+ else {
+ $new_value = $value
+ }
+ [void]$snake_list.Add($new_value)
+ }
+
+ return , $snake_list
+}
+
+# converts a dict/hashtable keys from camelCase to snake_case
+# this is to keep the return values consistent with the Ansible
+# way of working.
+Function Convert-DictToSnakeCase($dict) {
+ $snake_dict = @{}
+ foreach ($dict_entry in $dict.GetEnumerator()) {
+ $key = $dict_entry.Key
+ $snake_key = Convert-StringToSnakeCase -string $key
+
+ $value = $dict_entry.Value
+ if ($value -is [Hashtable]) {
+ $snake_dict.$snake_key = Convert-DictToSnakeCase -dict $value
+ }
+ elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) {
+ $snake_dict.$snake_key = Convert-ListToSnakeCase -list $value
+ }
+ else {
+ $snake_dict.$snake_key = $value
+ }
+ }
+
+ return , $snake_dict
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
new file mode 100644
index 0000000..56b5d39
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
@@ -0,0 +1,107 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#AnsibleRequires -CSharpUtil Ansible.Process
+
+Function Get-ExecutablePath {
+ <#
+ .SYNOPSIS
+ Get's the full path to an executable, will search the directory specified or ones in the PATH env var.
+
+ .PARAMETER executable
+ [String]The executable to search for.
+
+ .PARAMETER directory
+ [String] If set, the directory to search in.
+
+ .OUTPUT
+ [String] The full path the executable specified.
+ #>
+ Param(
+ [String]$executable,
+ [String]$directory = $null
+ )
+
+ # we need to add .exe if it doesn't have an extension already
+ if (-not [System.IO.Path]::HasExtension($executable)) {
+ $executable = "$($executable).exe"
+ }
+ $full_path = [System.IO.Path]::GetFullPath($executable)
+
+ if ($full_path -ne $executable -and $directory -ne $null) {
+ $file = Get-Item -LiteralPath "$directory\$executable" -Force -ErrorAction SilentlyContinue
+ }
+ else {
+ $file = Get-Item -LiteralPath $executable -Force -ErrorAction SilentlyContinue
+ }
+
+ if ($null -ne $file) {
+ $executable_path = $file.FullName
+ }
+ else {
+ $executable_path = [Ansible.Process.ProcessUtil]::SearchPath($executable)
+ }
+ return $executable_path
+}
+
+Function Run-Command {
+ <#
+ .SYNOPSIS
+ Run a command with the CreateProcess API and return the stdout/stderr and return code.
+
+ .PARAMETER command
+ The full command, including the executable, to run.
+
+ .PARAMETER working_directory
+ The working directory to set on the new process, will default to the current working dir.
+
+ .PARAMETER stdin
+ A string to sent over the stdin pipe to the new process.
+
+ .PARAMETER environment
+ A hashtable of key/value pairs to run with the command. If set, it will replace all other env vars.
+
+ .PARAMETER output_encoding_override
+ The character encoding name for decoding stdout/stderr output of the process.
+
+ .OUTPUT
+ [Hashtable]
+ [String]executable - The full path to the executable that was run
+ [String]stdout - The stdout stream of the process
+ [String]stderr - The stderr stream of the process
+ [Int32]rc - The return code of the process
+ #>
+ Param(
+ [string]$command,
+ [string]$working_directory = $null,
+ [string]$stdin = "",
+ [hashtable]$environment = @{},
+ [string]$output_encoding_override = $null
+ )
+
+ # need to validate the working directory if it is set
+ if ($working_directory) {
+ # validate working directory is a valid path
+ if (-not (Test-Path -LiteralPath $working_directory)) {
+ throw "invalid working directory path '$working_directory'"
+ }
+ }
+
+ # lpApplicationName needs to be the full path to an executable, we do this
+ # by getting the executable as the first arg and then getting the full path
+ $arguments = [Ansible.Process.ProcessUtil]::ParseCommandLine($command)
+ $executable = Get-ExecutablePath -executable $arguments[0] -directory $working_directory
+
+ # run the command and get the results
+ $command_result = [Ansible.Process.ProcessUtil]::CreateProcess($executable, $command, $working_directory, $environment, $stdin, $output_encoding_override)
+
+ return , @{
+ executable = $executable
+ stdout = $command_result.StandardOut
+ stderr = $command_result.StandardError
+ rc = $command_result.ExitCode
+ }
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Function Get-ExecutablePath, Run-Command
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1
new file mode 100644
index 0000000..cd614d4
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1
@@ -0,0 +1,66 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+<#
+Test-Path/Get-Item cannot find/return info on files that are locked like
+C:\pagefile.sys. These 2 functions are designed to work with these files and
+provide similar functionality with the normal cmdlets with as minimal overhead
+as possible. They work by using Get-ChildItem with a filter and return the
+result from that.
+#>
+
+Function Test-AnsiblePath {
+ [CmdletBinding()]
+ Param(
+ [Parameter(Mandatory = $true)][string]$Path
+ )
+ # Replacement for Test-Path
+ try {
+ $file_attributes = [System.IO.File]::GetAttributes($Path)
+ }
+ catch [System.IO.FileNotFoundException], [System.IO.DirectoryNotFoundException] {
+ return $false
+ }
+ catch [NotSupportedException] {
+ # When testing a path like Cert:\LocalMachine\My, System.IO.File will
+ # not work, we just revert back to using Test-Path for this
+ return Test-Path -Path $Path
+ }
+
+ if ([Int32]$file_attributes -eq -1) {
+ return $false
+ }
+ else {
+ return $true
+ }
+}
+
+Function Get-AnsibleItem {
+ [CmdletBinding()]
+ Param(
+ [Parameter(Mandatory = $true)][string]$Path
+ )
+ # Replacement for Get-Item
+ try {
+ $file_attributes = [System.IO.File]::GetAttributes($Path)
+ }
+ catch {
+ # if -ErrorAction SilentlyCotinue is set on the cmdlet and we failed to
+ # get the attributes, just return $null, otherwise throw the error
+ if ($ErrorActionPreference -ne "SilentlyContinue") {
+ throw $_
+ }
+ return $null
+ }
+ if ([Int32]$file_attributes -eq -1) {
+ throw New-Object -TypeName System.Management.Automation.ItemNotFoundException -ArgumentList "Cannot find path '$Path' because it does not exist."
+ }
+ elseif ($file_attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ return New-Object -TypeName System.IO.DirectoryInfo -ArgumentList $Path
+ }
+ else {
+ return New-Object -TypeName System.IO.FileInfo -ArgumentList $Path
+ }
+}
+
+Export-ModuleMember -Function Test-AnsiblePath, Get-AnsibleItem
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1
new file mode 100644
index 0000000..f0cb440
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1
@@ -0,0 +1,390 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+
+Function Set-Attr($obj, $name, $value) {
+ <#
+ .SYNOPSIS
+ Helper function to set an "attribute" on a psobject instance in PowerShell.
+ This is a convenience to make adding Members to the object easier and
+ slightly more pythonic
+ .EXAMPLE
+ Set-Attr $result "changed" $true
+#>
+
+ # If the provided $obj is undefined, define one to be nice
+ If (-not $obj.GetType) {
+ $obj = @{ }
+ }
+
+ Try {
+ $obj.$name = $value
+ }
+ Catch {
+ $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
+ }
+}
+
+Function Exit-Json($obj) {
+ <#
+ .SYNOPSIS
+ Helper function to convert a PowerShell object to JSON and output it, exiting
+ the script
+ .EXAMPLE
+ Exit-Json $result
+#>
+
+ # If the provided $obj is undefined, define one to be nice
+ If (-not $obj.GetType) {
+ $obj = @{ }
+ }
+
+ if (-not $obj.ContainsKey('changed')) {
+ Set-Attr -obj $obj -name "changed" -value $false
+ }
+
+ Write-Output $obj | ConvertTo-Json -Compress -Depth 99
+ Exit
+}
+
+Function Fail-Json($obj, $message = $null) {
+ <#
+ .SYNOPSIS
+ Helper function to add the "msg" property and "failed" property, convert the
+ PowerShell Hashtable to JSON and output it, exiting the script
+ .EXAMPLE
+ Fail-Json $result "This is the failure message"
+#>
+
+ if ($obj -is [hashtable] -or $obj -is [psobject]) {
+ # Nothing to do
+ }
+ elseif ($obj -is [string] -and $null -eq $message) {
+ # If we weren't given 2 args, and the only arg was a string,
+ # create a new Hashtable and use the arg as the failure message
+ $message = $obj
+ $obj = @{ }
+ }
+ else {
+ # If the first argument is undefined or a different type,
+ # make it a Hashtable
+ $obj = @{ }
+ }
+
+ # Still using Set-Attr for PSObject compatibility
+ Set-Attr -obj $obj -name "msg" -value $message
+ Set-Attr -obj $obj -name "failed" -value $true
+
+ if (-not $obj.ContainsKey('changed')) {
+ Set-Attr -obj $obj -name "changed" -value $false
+ }
+
+ Write-Output $obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+}
+
+Function Add-Warning($obj, $message) {
+ <#
+ .SYNOPSIS
+ Helper function to add warnings, even if the warnings attribute was
+ not already set up. This is a convenience for the module developer
+ so they do not have to check for the attribute prior to adding.
+#>
+
+ if (-not $obj.ContainsKey("warnings")) {
+ $obj.warnings = @()
+ }
+ elseif ($obj.warnings -isnot [array]) {
+ throw "Add-Warning: warnings attribute is not an array"
+ }
+
+ $obj.warnings += $message
+}
+
+Function Add-DeprecationWarning($obj, $message, $version = $null) {
+ <#
+ .SYNOPSIS
+ Helper function to add deprecations, even if the deprecations attribute was
+ not already set up. This is a convenience for the module developer
+ so they do not have to check for the attribute prior to adding.
+#>
+ if (-not $obj.ContainsKey("deprecations")) {
+ $obj.deprecations = @()
+ }
+ elseif ($obj.deprecations -isnot [array]) {
+ throw "Add-DeprecationWarning: deprecations attribute is not a list"
+ }
+
+ $obj.deprecations += @{
+ msg = $message
+ version = $version
+ }
+}
+
+Function Expand-Environment($value) {
+ <#
+ .SYNOPSIS
+ Helper function to expand environment variables in values. By default
+ it turns any type to a string, but we ensure $null remains $null.
+#>
+ if ($null -ne $value) {
+ [System.Environment]::ExpandEnvironmentVariables($value)
+ }
+ else {
+ $value
+ }
+}
+
+Function Get-AnsibleParam {
+ <#
+ .SYNOPSIS
+ Helper function to get an "attribute" from a psobject instance in PowerShell.
+ This is a convenience to make getting Members from an object easier and
+ slightly more pythonic
+ .EXAMPLE
+ $attr = Get-AnsibleParam $response "code" -default "1"
+ .EXAMPLE
+ Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true
+ Get-AnsibleParam also supports Parameter validation to save you from coding that manually
+ Note that if you use the failifempty option, you do need to specify resultobject as well.
+#>
+ param (
+ $obj,
+ $name,
+ $default = $null,
+ $resultobj = @{},
+ $failifempty = $false,
+ $emptyattributefailmessage,
+ $ValidateSet,
+ $ValidateSetErrorMessage,
+ $type = $null,
+ $aliases = @()
+ )
+ # Check if the provided Member $name or aliases exist in $obj and return it or the default.
+ try {
+
+ $found = $null
+ # First try to find preferred parameter $name
+ $aliases = @($name) + $aliases
+
+ # Iterate over aliases to find acceptable Member $name
+ foreach ($alias in $aliases) {
+ if ($obj.ContainsKey($alias)) {
+ $found = $alias
+ break
+ }
+ }
+
+ if ($null -eq $found) {
+ throw
+ }
+ $name = $found
+
+ if ($ValidateSet) {
+
+ if ($ValidateSet -contains ($obj.$name)) {
+ $value = $obj.$name
+ }
+ else {
+ if ($null -eq $ValidateSetErrorMessage) {
+ #Auto-generated error should be sufficient in most use cases
+ $ValidateSetErrorMessage = "Get-AnsibleParam: Argument $name needs to be one of $($ValidateSet -join ",") but was $($obj.$name)."
+ }
+ Fail-Json -obj $resultobj -message $ValidateSetErrorMessage
+ }
+ }
+ else {
+ $value = $obj.$name
+ }
+ }
+ catch {
+ if ($failifempty -eq $false) {
+ $value = $default
+ }
+ else {
+ if (-not $emptyattributefailmessage) {
+ $emptyattributefailmessage = "Get-AnsibleParam: Missing required argument: $name"
+ }
+ Fail-Json -obj $resultobj -message $emptyattributefailmessage
+ }
+ }
+
+ # If $null -eq $value, the parameter was unspecified by the user (deliberately or not)
+ # Please leave $null-values intact, modules need to know if a parameter was specified
+ if ($null -eq $value) {
+ return $null
+ }
+
+ if ($type -eq "path") {
+ # Expand environment variables on path-type
+ $value = Expand-Environment($value)
+ # Test if a valid path is provided
+ if (-not (Test-Path -IsValid $value)) {
+ $path_invalid = $true
+ # could still be a valid-shaped path with a nonexistent drive letter
+ if ($value -match "^\w:") {
+ # rewrite path with a valid drive letter and recheck the shape- this might still fail, eg, a nonexistent non-filesystem PS path
+ if (Test-Path -IsValid $(@(Get-PSDrive -PSProvider Filesystem)[0].Name + $value.Substring(1))) {
+ $path_invalid = $false
+ }
+ }
+ if ($path_invalid) {
+ Fail-Json -obj $resultobj -message "Get-AnsibleParam: Parameter '$name' has an invalid path '$value' specified."
+ }
+ }
+ }
+ elseif ($type -eq "str") {
+ # Convert str types to real Powershell strings
+ $value = $value.ToString()
+ }
+ elseif ($type -eq "bool") {
+ # Convert boolean types to real Powershell booleans
+ $value = $value | ConvertTo-Bool
+ }
+ elseif ($type -eq "int") {
+ # Convert int types to real Powershell integers
+ $value = $value -as [int]
+ }
+ elseif ($type -eq "float") {
+ # Convert float types to real Powershell floats
+ $value = $value -as [float]
+ }
+ elseif ($type -eq "list") {
+ if ($value -is [array]) {
+ # Nothing to do
+ }
+ elseif ($value -is [string]) {
+ # Convert string type to real Powershell array
+ $value = $value.Split(",").Trim()
+ }
+ elseif ($value -is [int]) {
+ $value = @($value)
+ }
+ else {
+ Fail-Json -obj $resultobj -message "Get-AnsibleParam: Parameter '$name' is not a YAML list."
+ }
+ # , is not a typo, forces it to return as a list when it is empty or only has 1 entry
+ return , $value
+ }
+
+ return $value
+}
+
+#Alias Get-attr-->Get-AnsibleParam for backwards compat. Only add when needed to ease debugging of scripts
+If (-not(Get-Alias -Name "Get-attr" -ErrorAction SilentlyContinue)) {
+ New-Alias -Name Get-attr -Value Get-AnsibleParam
+}
+
+Function ConvertTo-Bool {
+ <#
+ .SYNOPSIS
+ Helper filter/pipeline function to convert a value to boolean following current
+ Ansible practices
+ .EXAMPLE
+ $is_true = "true" | ConvertTo-Bool
+#>
+ param(
+ [parameter(valuefrompipeline = $true)]
+ $obj
+ )
+
+ process {
+ $boolean_strings = "yes", "on", "1", "true", 1
+ $obj_string = [string]$obj
+
+ if (($obj -is [boolean] -and $obj) -or $boolean_strings -contains $obj_string.ToLower()) {
+ return $true
+ }
+ else {
+ return $false
+ }
+ }
+}
+
+Function Parse-Args {
+ <#
+ .SYNOPSIS
+ Helper function to parse Ansible JSON arguments from a "file" passed as
+ the single argument to the module.
+ .EXAMPLE
+ $params = Parse-Args $args
+#>
+ [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSUseSingularNouns", "", Justification = "Cannot change the name now")]
+ param ($arguments, $supports_check_mode = $false)
+
+ $params = New-Object psobject
+ If ($arguments.Length -gt 0) {
+ $params = Get-Content $arguments[0] | ConvertFrom-Json
+ }
+ Else {
+ $params = $complex_args
+ }
+ $check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+ If ($check_mode -and -not $supports_check_mode) {
+ Exit-Json @{
+ skipped = $true
+ changed = $false
+ msg = "remote module does not support check mode"
+ }
+ }
+ return $params
+}
+
+
+Function Get-FileChecksum($path, $algorithm = 'sha1') {
+ <#
+ .SYNOPSIS
+ Helper function to calculate a hash of a file in a way which PowerShell 3
+ and above can handle
+#>
+ If (Test-Path -LiteralPath $path -PathType Leaf) {
+ switch ($algorithm) {
+ 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ default { Fail-Json @{} "Unsupported hash algorithm supplied '$algorithm'" }
+ }
+
+ If ($PSVersionTable.PSVersion.Major -ge 4) {
+ $raw_hash = Get-FileHash -LiteralPath $path -Algorithm $algorithm
+ $hash = $raw_hash.Hash.ToLower()
+ }
+ Else {
+ $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite);
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $fp.Dispose();
+ }
+ }
+ ElseIf (Test-Path -LiteralPath $path -PathType Container) {
+ $hash = "3";
+ }
+ Else {
+ $hash = "1";
+ }
+ return $hash
+}
+
+Function Get-PendingRebootStatus {
+ <#
+ .SYNOPSIS
+ Check if reboot is required, if so notify CA.
+ Function returns true if computer has a pending reboot
+#>
+ $featureData = Invoke-CimMethod -EA Ignore -Name GetServerFeature -Namespace root\microsoft\windows\servermanager -Class MSFT_ServerManagerTasks
+ $regData = Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" "PendingFileRenameOperations" -EA Ignore
+ $CBSRebootStatus = Get-ChildItem "HKLM:\\SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing" -ErrorAction SilentlyContinue |
+ Where-Object { $_.PSChildName -eq "RebootPending" }
+ if (($featureData -and $featureData.RequiresReboot) -or $regData -or $CBSRebootStatus) {
+ return $True
+ }
+ else {
+ return $False
+ }
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1
new file mode 100644
index 0000000..1a251f6
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1
@@ -0,0 +1,464 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+
+Function Load-LinkUtils {
+ [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSUseSingularNouns", "", Justification = "Cannot change the name now")]
+ param ()
+
+ $link_util = @'
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Ansible
+{
+ public enum LinkType
+ {
+ SymbolicLink,
+ JunctionPoint,
+ HardLink
+ }
+
+ public class LinkUtilWin32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public LinkUtilWin32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+
+ public LinkUtilWin32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator LinkUtilWin32Exception(string message) { return new LinkUtilWin32Exception(message); }
+ }
+
+ public class LinkInfo
+ {
+ public LinkType Type { get; internal set; }
+ public string PrintName { get; internal set; }
+ public string SubstituteName { get; internal set; }
+ public string AbsolutePath { get; internal set; }
+ public string TargetPath { get; internal set; }
+ public string[] HardTargets { get; internal set; }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct REPARSE_DATA_BUFFER
+ {
+ public UInt32 ReparseTag;
+ public UInt16 ReparseDataLength;
+ public UInt16 Reserved;
+ public UInt16 SubstituteNameOffset;
+ public UInt16 SubstituteNameLength;
+ public UInt16 PrintNameOffset;
+ public UInt16 PrintNameLength;
+
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = LinkUtil.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)]
+ public char[] PathBuffer;
+ }
+
+ public class LinkUtil
+ {
+ public const int MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 1024 * 16;
+
+ private const UInt32 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
+ private const UInt32 FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000;
+
+ private const UInt32 FSCTL_GET_REPARSE_POINT = 0x000900A8;
+ private const UInt32 FSCTL_SET_REPARSE_POINT = 0x000900A4;
+ private const UInt32 FILE_DEVICE_FILE_SYSTEM = 0x00090000;
+
+ private const UInt32 IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003;
+ private const UInt32 IO_REPARSE_TAG_SYMLINK = 0xA000000C;
+
+ private const UInt32 SYMLINK_FLAG_RELATIVE = 0x00000001;
+
+ private const Int64 INVALID_HANDLE_VALUE = -1;
+
+ private const UInt32 SIZE_OF_WCHAR = 2;
+
+ private const UInt32 SYMBOLIC_LINK_FLAG_FILE = 0x00000000;
+ private const UInt32 SYMBOLIC_LINK_FLAG_DIRECTORY = 0x00000001;
+
+ [DllImport("kernel32.dll", CharSet = CharSet.Auto)]
+ private static extern SafeFileHandle CreateFile(
+ string lpFileName,
+ [MarshalAs(UnmanagedType.U4)] FileAccess dwDesiredAccess,
+ [MarshalAs(UnmanagedType.U4)] FileShare dwShareMode,
+ IntPtr lpSecurityAttributes,
+ [MarshalAs(UnmanagedType.U4)] FileMode dwCreationDisposition,
+ UInt32 dwFlagsAndAttributes,
+ IntPtr hTemplateFile);
+
+ // Used by GetReparsePointInfo()
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeviceIoControl(
+ SafeFileHandle hDevice,
+ UInt32 dwIoControlCode,
+ IntPtr lpInBuffer,
+ UInt32 nInBufferSize,
+ out REPARSE_DATA_BUFFER lpOutBuffer,
+ UInt32 nOutBufferSize,
+ out UInt32 lpBytesReturned,
+ IntPtr lpOverlapped);
+
+ // Used by CreateJunctionPoint()
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeviceIoControl(
+ SafeFileHandle hDevice,
+ UInt32 dwIoControlCode,
+ REPARSE_DATA_BUFFER lpInBuffer,
+ UInt32 nInBufferSize,
+ IntPtr lpOutBuffer,
+ UInt32 nOutBufferSize,
+ out UInt32 lpBytesReturned,
+ IntPtr lpOverlapped);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool GetVolumePathName(
+ string lpszFileName,
+ StringBuilder lpszVolumePathName,
+ ref UInt32 cchBufferLength);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern IntPtr FindFirstFileNameW(
+ string lpFileName,
+ UInt32 dwFlags,
+ ref UInt32 StringLength,
+ StringBuilder LinkName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool FindNextFileNameW(
+ IntPtr hFindStream,
+ ref UInt32 StringLength,
+ StringBuilder LinkName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool FindClose(
+ IntPtr hFindFile);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool RemoveDirectory(
+ string lpPathName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeleteFile(
+ string lpFileName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool CreateSymbolicLink(
+ string lpSymlinkFileName,
+ string lpTargetFileName,
+ UInt32 dwFlags);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool CreateHardLink(
+ string lpFileName,
+ string lpExistingFileName,
+ IntPtr lpSecurityAttributes);
+
+ public static LinkInfo GetLinkInfo(string linkPath)
+ {
+ FileAttributes attr = File.GetAttributes(linkPath);
+ if (attr.HasFlag(FileAttributes.ReparsePoint))
+ return GetReparsePointInfo(linkPath);
+
+ if (!attr.HasFlag(FileAttributes.Directory))
+ return GetHardLinkInfo(linkPath);
+
+ return null;
+ }
+
+ public static void DeleteLink(string linkPath)
+ {
+ bool success;
+ FileAttributes attr = File.GetAttributes(linkPath);
+ if (attr.HasFlag(FileAttributes.Directory))
+ {
+ success = RemoveDirectory(linkPath);
+ }
+ else
+ {
+ success = DeleteFile(linkPath);
+ }
+
+ if (!success)
+ throw new LinkUtilWin32Exception(String.Format("Failed to delete link at {0}", linkPath));
+ }
+
+ public static void CreateLink(string linkPath, String linkTarget, LinkType linkType)
+ {
+ switch (linkType)
+ {
+ case LinkType.SymbolicLink:
+ UInt32 linkFlags;
+ FileAttributes attr = File.GetAttributes(linkTarget);
+ if (attr.HasFlag(FileAttributes.Directory))
+ linkFlags = SYMBOLIC_LINK_FLAG_DIRECTORY;
+ else
+ linkFlags = SYMBOLIC_LINK_FLAG_FILE;
+
+ if (!CreateSymbolicLink(linkPath, linkTarget, linkFlags))
+ throw new LinkUtilWin32Exception(String.Format("CreateSymbolicLink({0}, {1}, {2}) failed", linkPath, linkTarget, linkFlags));
+ break;
+ case LinkType.JunctionPoint:
+ CreateJunctionPoint(linkPath, linkTarget);
+ break;
+ case LinkType.HardLink:
+ if (!CreateHardLink(linkPath, linkTarget, IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("CreateHardLink({0}, {1}) failed", linkPath, linkTarget));
+ break;
+ }
+ }
+
+ private static LinkInfo GetHardLinkInfo(string linkPath)
+ {
+ UInt32 maxPath = 260;
+ List<string> result = new List<string>();
+
+ StringBuilder sb = new StringBuilder((int)maxPath);
+ UInt32 stringLength = maxPath;
+ if (!GetVolumePathName(linkPath, sb, ref stringLength))
+ throw new LinkUtilWin32Exception("GetVolumePathName() failed");
+ string volume = sb.ToString();
+
+ stringLength = maxPath;
+ IntPtr findHandle = FindFirstFileNameW(linkPath, 0, ref stringLength, sb);
+ if (findHandle.ToInt64() != INVALID_HANDLE_VALUE)
+ {
+ try
+ {
+ do
+ {
+ string hardLinkPath = sb.ToString();
+ if (hardLinkPath.StartsWith("\\"))
+ hardLinkPath = hardLinkPath.Substring(1, hardLinkPath.Length - 1);
+
+ result.Add(Path.Combine(volume, hardLinkPath));
+ stringLength = maxPath;
+
+ } while (FindNextFileNameW(findHandle, ref stringLength, sb));
+ }
+ finally
+ {
+ FindClose(findHandle);
+ }
+ }
+
+ if (result.Count > 1)
+ return new LinkInfo
+ {
+ Type = LinkType.HardLink,
+ HardTargets = result.ToArray()
+ };
+
+ return null;
+ }
+
+ private static LinkInfo GetReparsePointInfo(string linkPath)
+ {
+ SafeFileHandle fileHandle = CreateFile(
+ linkPath,
+ FileAccess.Read,
+ FileShare.None,
+ IntPtr.Zero,
+ FileMode.Open,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+ IntPtr.Zero);
+
+ if (fileHandle.IsInvalid)
+ throw new LinkUtilWin32Exception(String.Format("CreateFile({0}) failed", linkPath));
+
+ REPARSE_DATA_BUFFER buffer = new REPARSE_DATA_BUFFER();
+ UInt32 bytesReturned;
+ try
+ {
+ if (!DeviceIoControl(
+ fileHandle,
+ FSCTL_GET_REPARSE_POINT,
+ IntPtr.Zero,
+ 0,
+ out buffer,
+ MAXIMUM_REPARSE_DATA_BUFFER_SIZE,
+ out bytesReturned,
+ IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("DeviceIoControl() failed for file at {0}", linkPath));
+ }
+ finally
+ {
+ fileHandle.Dispose();
+ }
+
+ bool isRelative = false;
+ int pathOffset = 0;
+ LinkType linkType;
+ if (buffer.ReparseTag == IO_REPARSE_TAG_SYMLINK)
+ {
+ UInt32 bufferFlags = Convert.ToUInt32(buffer.PathBuffer[0]) + Convert.ToUInt32(buffer.PathBuffer[1]);
+ if (bufferFlags == SYMLINK_FLAG_RELATIVE)
+ isRelative = true;
+ pathOffset = 2;
+ linkType = LinkType.SymbolicLink;
+ }
+ else if (buffer.ReparseTag == IO_REPARSE_TAG_MOUNT_POINT)
+ {
+ linkType = LinkType.JunctionPoint;
+ }
+ else
+ {
+ string errorMessage = String.Format("Invalid Reparse Tag: {0}", buffer.ReparseTag.ToString());
+ throw new Exception(errorMessage);
+ }
+
+ string printName = new string(buffer.PathBuffer,
+ (int)(buffer.PrintNameOffset / SIZE_OF_WCHAR) + pathOffset,
+ (int)(buffer.PrintNameLength / SIZE_OF_WCHAR));
+ string substituteName = new string(buffer.PathBuffer,
+ (int)(buffer.SubstituteNameOffset / SIZE_OF_WCHAR) + pathOffset,
+ (int)(buffer.SubstituteNameLength / SIZE_OF_WCHAR));
+
+ // TODO: should we check for \?\UNC\server for convert it to the NT style \\server path
+ // Remove the leading Windows object directory \?\ from the path if present
+ string targetPath = substituteName;
+ if (targetPath.StartsWith("\\??\\"))
+ targetPath = targetPath.Substring(4, targetPath.Length - 4);
+
+ string absolutePath = targetPath;
+ if (isRelative)
+ absolutePath = Path.GetFullPath(Path.Combine(new FileInfo(linkPath).Directory.FullName, targetPath));
+
+ return new LinkInfo
+ {
+ Type = linkType,
+ PrintName = printName,
+ SubstituteName = substituteName,
+ AbsolutePath = absolutePath,
+ TargetPath = targetPath
+ };
+ }
+
+ private static void CreateJunctionPoint(string linkPath, string linkTarget)
+ {
+ // We need to create the link as a dir beforehand
+ Directory.CreateDirectory(linkPath);
+ SafeFileHandle fileHandle = CreateFile(
+ linkPath,
+ FileAccess.Write,
+ FileShare.Read | FileShare.Write | FileShare.None,
+ IntPtr.Zero,
+ FileMode.Open,
+ FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
+ IntPtr.Zero);
+
+ if (fileHandle.IsInvalid)
+ throw new LinkUtilWin32Exception(String.Format("CreateFile({0}) failed", linkPath));
+
+ try
+ {
+ string substituteName = "\\??\\" + Path.GetFullPath(linkTarget);
+ string printName = linkTarget;
+
+ REPARSE_DATA_BUFFER buffer = new REPARSE_DATA_BUFFER();
+ buffer.SubstituteNameOffset = 0;
+ buffer.SubstituteNameLength = (UInt16)(substituteName.Length * SIZE_OF_WCHAR);
+ buffer.PrintNameOffset = (UInt16)(buffer.SubstituteNameLength + 2);
+ buffer.PrintNameLength = (UInt16)(printName.Length * SIZE_OF_WCHAR);
+
+ buffer.ReparseTag = IO_REPARSE_TAG_MOUNT_POINT;
+ buffer.ReparseDataLength = (UInt16)(buffer.SubstituteNameLength + buffer.PrintNameLength + 12);
+ buffer.PathBuffer = new char[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+
+ byte[] unicodeBytes = Encoding.Unicode.GetBytes(substituteName + "\0" + printName);
+ char[] pathBuffer = Encoding.Unicode.GetChars(unicodeBytes);
+ Array.Copy(pathBuffer, buffer.PathBuffer, pathBuffer.Length);
+
+ UInt32 bytesReturned;
+ if (!DeviceIoControl(
+ fileHandle,
+ FSCTL_SET_REPARSE_POINT,
+ buffer,
+ (UInt32)(buffer.ReparseDataLength + 8),
+ IntPtr.Zero, 0,
+ out bytesReturned,
+ IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("DeviceIoControl() failed to create junction point at {0} to {1}", linkPath, linkTarget));
+ }
+ finally
+ {
+ fileHandle.Dispose();
+ }
+ }
+ }
+}
+'@
+
+ # FUTURE: find a better way to get the _ansible_remote_tmp variable
+ $original_tmp = $env:TMP
+ $original_lib = $env:LIB
+
+ $remote_tmp = $original_tmp
+ $module_params = Get-Variable -Name complex_args -ErrorAction SilentlyContinue
+ if ($module_params) {
+ if ($module_params.Value.ContainsKey("_ansible_remote_tmp") ) {
+ $remote_tmp = $module_params.Value["_ansible_remote_tmp"]
+ $remote_tmp = [System.Environment]::ExpandEnvironmentVariables($remote_tmp)
+ }
+ }
+
+ $env:TMP = $remote_tmp
+ $env:LIB = $null
+ Add-Type -TypeDefinition $link_util
+ $env:TMP = $original_tmp
+ $env:LIB = $original_lib
+
+ # enable the SeBackupPrivilege if it is disabled
+ $state = Get-AnsiblePrivilege -Name SeBackupPrivilege
+ if ($state -eq $false) {
+ Set-AnsiblePrivilege -Name SeBackupPrivilege -Value $true
+ }
+}
+
+Function Get-Link($link_path) {
+ $link_info = [Ansible.LinkUtil]::GetLinkInfo($link_path)
+ return $link_info
+}
+
+Function Remove-Link($link_path) {
+ [Ansible.LinkUtil]::DeleteLink($link_path)
+}
+
+Function New-Link($link_path, $link_target, $link_type) {
+ if (-not (Test-Path -LiteralPath $link_target)) {
+ throw "link_target '$link_target' does not exist, cannot create link"
+ }
+
+ switch ($link_type) {
+ "link" {
+ $type = [Ansible.LinkType]::SymbolicLink
+ }
+ "junction" {
+ if (Test-Path -LiteralPath $link_target -PathType Leaf) {
+ throw "cannot set the target for a junction point to a file"
+ }
+ $type = [Ansible.LinkType]::JunctionPoint
+ }
+ "hard" {
+ if (Test-Path -LiteralPath $link_target -PathType Container) {
+ throw "cannot set the target for a hard link to a directory"
+ }
+ $type = [Ansible.LinkType]::HardLink
+ }
+ default { throw "invalid link_type option $($link_type): expecting link, junction, hard" }
+ }
+ [Ansible.LinkUtil]::CreateLink($link_path, $link_target, $type)
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1
new file mode 100644
index 0000000..78f0d64
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1
@@ -0,0 +1,83 @@
+# Copyright (c) 2018 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#AnsibleRequires -CSharpUtil Ansible.Privilege
+
+Function Get-AnsiblePrivilege {
+ <#
+ .SYNOPSIS
+ Get the status of a privilege for the current process. This returns
+ $true - the privilege is enabled
+ $false - the privilege is disabled
+ $null - the privilege is removed from the token
+
+ If Name is not a valid privilege name, this will throw an
+ ArgumentException.
+
+ .EXAMPLE
+ Get-AnsiblePrivilege -Name SeDebugPrivilege
+ #>
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory = $true)][String]$Name
+ )
+
+ if (-not [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName($Name)) {
+ throw [System.ArgumentException] "Invalid privilege name '$Name'"
+ }
+
+ $process_token = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+ $privilege_info = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process_token)
+ if ($privilege_info.ContainsKey($Name)) {
+ $status = $privilege_info.$Name
+ return $status.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ }
+ else {
+ return $null
+ }
+}
+
+Function Set-AnsiblePrivilege {
+ <#
+ .SYNOPSIS
+ Enables/Disables a privilege on the current process' token. If a privilege
+ has been removed from the process token, this will throw an
+ InvalidOperationException.
+
+ .EXAMPLE
+ # enable a privilege
+ Set-AnsiblePrivilege -Name SeCreateSymbolicLinkPrivilege -Value $true
+
+ # disable a privilege
+ Set-AnsiblePrivilege -Name SeCreateSymbolicLinkPrivilege -Value $false
+ #>
+ [CmdletBinding(SupportsShouldProcess)]
+ param(
+ [Parameter(Mandatory = $true)][String]$Name,
+ [Parameter(Mandatory = $true)][bool]$Value
+ )
+
+ $action = switch ($Value) {
+ $true { "Enable" }
+ $false { "Disable" }
+ }
+
+ $current_state = Get-AnsiblePrivilege -Name $Name
+ if ($current_state -eq $Value) {
+ return # no change needs to occur
+ }
+ elseif ($null -eq $current_state) {
+ # once a privilege is removed from a token we cannot do anything with it
+ throw [System.InvalidOperationException] "Cannot $($action.ToLower()) the privilege '$Name' as it has been removed from the token"
+ }
+
+ $process_token = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+ if ($PSCmdlet.ShouldProcess($Name, "$action the privilege $Name")) {
+ $new_state = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[System.String], [System.Nullable`1[System.Boolean]]]'
+ $new_state.Add($Name, $Value)
+ [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process_token, $new_state) > $null
+ }
+}
+
+Export-ModuleMember -Function Get-AnsiblePrivilege, Set-AnsiblePrivilege
+
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1
new file mode 100644
index 0000000..d1f4b62
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1
@@ -0,0 +1,99 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Convert-FromSID($sid) {
+ # Converts a SID to a Down-Level Logon name in the form of DOMAIN\UserName
+ # If the SID is for a local user or group then DOMAIN would be the server
+ # name.
+
+ $account_object = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ try {
+ $nt_account = $account_object.Translate([System.Security.Principal.NTAccount])
+ }
+ catch {
+ Fail-Json -obj @{} -message "failed to convert sid '$sid' to a logon name: $($_.Exception.Message)"
+ }
+
+ return $nt_account.Value
+}
+
+Function Convert-ToSID {
+ [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingEmptyCatchBlock", "",
+ Justification = "We don't care if converting to a SID fails, just that it failed or not")]
+ param($account_name)
+ # Converts an account name to a SID, it can take in the following forms
+ # SID: Will just return the SID value that was passed in
+ # UPN:
+ # principal@domain (Domain users only)
+ # Down-Level Login Name
+ # DOMAIN\principal (Domain)
+ # SERVERNAME\principal (Local)
+ # .\principal (Local)
+ # NT AUTHORITY\SYSTEM (Local Service Accounts)
+ # Login Name
+ # principal (Local/Local Service Accounts)
+
+ try {
+ $sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList $account_name
+ return $sid.Value
+ }
+ catch {}
+
+ if ($account_name -like "*\*") {
+ $account_name_split = $account_name -split "\\"
+ if ($account_name_split[0] -eq ".") {
+ $domain = $env:COMPUTERNAME
+ }
+ else {
+ $domain = $account_name_split[0]
+ }
+ $username = $account_name_split[1]
+ }
+ else {
+ $domain = $null
+ $username = $account_name
+ }
+
+ if ($domain) {
+ # searching for a local group with the servername prefixed will fail,
+ # need to check for this situation and only use NTAccount(String)
+ if ($domain -eq $env:COMPUTERNAME) {
+ $adsi = [ADSI]("WinNT://$env:COMPUTERNAME,computer")
+ $group = $adsi.psbase.children | Where-Object { $_.schemaClassName -eq "group" -and $_.Name -eq $username }
+ }
+ else {
+ $group = $null
+ }
+ if ($group) {
+ $account = New-Object System.Security.Principal.NTAccount($username)
+ }
+ else {
+ $account = New-Object System.Security.Principal.NTAccount($domain, $username)
+ }
+ }
+ else {
+ # when in a domain NTAccount(String) will favour domain lookups check
+ # if username is a local user and explicitly search on the localhost for
+ # that account
+ $adsi = [ADSI]("WinNT://$env:COMPUTERNAME,computer")
+ $user = $adsi.psbase.children | Where-Object { $_.schemaClassName -eq "user" -and $_.Name -eq $username }
+ if ($user) {
+ $account = New-Object System.Security.Principal.NTAccount($env:COMPUTERNAME, $username)
+ }
+ else {
+ $account = New-Object System.Security.Principal.NTAccount($username)
+ }
+ }
+
+ try {
+ $account_sid = $account.Translate([System.Security.Principal.SecurityIdentifier])
+ }
+ catch {
+ Fail-Json @{} "account_name $account_name is not a valid account, cannot get SID: $($_.Exception.Message)"
+ }
+
+ return $account_sid.Value
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
new file mode 100644
index 0000000..b59ba72
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
@@ -0,0 +1,530 @@
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Get-AnsibleWebRequest {
+ <#
+ .SYNOPSIS
+ Creates a System.Net.WebRequest object based on common URL module options in Ansible.
+
+ .DESCRIPTION
+ Will create a WebRequest based on common input options within Ansible. This can be used manually or with
+ Invoke-WithWebRequest.
+
+ .PARAMETER Uri
+ The URI to create the web request for.
+
+ .PARAMETER Method
+ The protocol method to use, if omitted, will use the default value for the URI protocol specified.
+
+ .PARAMETER FollowRedirects
+ Whether to follow redirect reponses. This is only valid when using a HTTP URI.
+ all - Will follow all redirects
+ none - Will follow no redirects
+ safe - Will only follow redirects when GET or HEAD is used as the Method
+
+ .PARAMETER Headers
+ A hashtable or dictionary of header values to set on the request. This is only valid for a HTTP URI.
+
+ .PARAMETER HttpAgent
+ A string to set for the 'User-Agent' header. This is only valid for a HTTP URI.
+
+ .PARAMETER MaximumRedirection
+ The maximum number of redirections that will be followed. This is only valid for a HTTP URI.
+
+ .PARAMETER Timeout
+ The timeout in seconds that defines how long to wait until the request times out.
+
+ .PARAMETER ValidateCerts
+ Whether to validate SSL certificates, default to True.
+
+ .PARAMETER ClientCert
+ The path to PFX file to use for X509 authentication. This is only valid for a HTTP URI. This path can either
+ be a filesystem path (C:\folder\cert.pfx) or a PSPath to a credential (Cert:\CurrentUser\My\<thumbprint>).
+
+ .PARAMETER ClientCertPassword
+ The password for the PFX certificate if required. This is only valid for a HTTP URI.
+
+ .PARAMETER ForceBasicAuth
+ Whether to set the Basic auth header on the first request instead of when required. This is only valid for a
+ HTTP URI.
+
+ .PARAMETER UrlUsername
+ The username to use for authenticating with the target.
+
+ .PARAMETER UrlPassword
+ The password to use for authenticating with the target.
+
+ .PARAMETER UseDefaultCredential
+ Whether to use the current user's credentials if available. This will only work when using Become, using SSH with
+ password auth, or WinRM with CredSSP or Kerberos with credential delegation.
+
+ .PARAMETER UseProxy
+ Whether to use the default proxy defined in IE (WinINet) for the user or set no proxy at all. This should not
+ be set to True when ProxyUrl is also defined.
+
+ .PARAMETER ProxyUrl
+ An explicit proxy server to use for the request instead of relying on the default proxy in IE. This is only
+ valid for a HTTP URI.
+
+ .PARAMETER ProxyUsername
+ An optional username to use for proxy authentication.
+
+ .PARAMETER ProxyPassword
+ The password for ProxyUsername.
+
+ .PARAMETER ProxyUseDefaultCredential
+ Whether to use the current user's credentials for proxy authentication if available. This will only work when
+ using Become, using SSH with password auth, or WinRM with CredSSP or Kerberos with credential delegation.
+
+ .PARAMETER Module
+ The AnsibleBasic module that can be used as a backup parameter source or a way to return warnings back to the
+ Ansible controller.
+
+ .EXAMPLE
+ $spec = @{
+ options = @{}
+ }
+ $module = Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+
+ $web_request = Get-AnsibleWebRequest -Module $module
+ #>
+ [CmdletBinding()]
+ [OutputType([System.Net.WebRequest])]
+ Param (
+ [Alias("url")]
+ [System.Uri]
+ $Uri,
+
+ [System.String]
+ $Method,
+
+ [Alias("follow_redirects")]
+ [ValidateSet("all", "none", "safe")]
+ [System.String]
+ $FollowRedirects = "safe",
+
+ [System.Collections.IDictionary]
+ $Headers,
+
+ [Alias("http_agent")]
+ [System.String]
+ $HttpAgent = "ansible-httpget",
+
+ [Alias("maximum_redirection")]
+ [System.Int32]
+ $MaximumRedirection = 50,
+
+ [System.Int32]
+ $Timeout = 30,
+
+ [Alias("validate_certs")]
+ [System.Boolean]
+ $ValidateCerts = $true,
+
+ # Credential params
+ [Alias("client_cert")]
+ [System.String]
+ $ClientCert,
+
+ [Alias("client_cert_password")]
+ [System.String]
+ $ClientCertPassword,
+
+ [Alias("force_basic_auth")]
+ [Switch]
+ $ForceBasicAuth,
+
+ [Alias("url_username")]
+ [System.String]
+ $UrlUsername,
+
+ [Alias("url_password")]
+ [System.String]
+ $UrlPassword,
+
+ [Alias("use_default_credential")]
+ [Switch]
+ $UseDefaultCredential,
+
+ # Proxy params
+ [Alias("use_proxy")]
+ [System.Boolean]
+ $UseProxy = $true,
+
+ [Alias("proxy_url")]
+ [System.String]
+ $ProxyUrl,
+
+ [Alias("proxy_username")]
+ [System.String]
+ $ProxyUsername,
+
+ [Alias("proxy_password")]
+ [System.String]
+ $ProxyPassword,
+
+ [Alias("proxy_use_default_credential")]
+ [Switch]
+ $ProxyUseDefaultCredential,
+
+ [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })]
+ [System.Object]
+ $Module
+ )
+
+ # Set module options for parameters unless they were explicitly passed in.
+ if ($Module) {
+ foreach ($param in $PSCmdlet.MyInvocation.MyCommand.Parameters.GetEnumerator()) {
+ if ($PSBoundParameters.ContainsKey($param.Key)) {
+ # Was set explicitly we want to use that value
+ continue
+ }
+
+ foreach ($alias in @($Param.Key) + $param.Value.Aliases) {
+ if ($Module.Params.ContainsKey($alias)) {
+ $var_value = $Module.Params.$alias -as $param.Value.ParameterType
+ Set-Variable -Name $param.Key -Value $var_value
+ break
+ }
+ }
+ }
+ }
+
+ # Disable certificate validation if requested
+ # FUTURE: set this on ServerCertificateValidationCallback of the HttpWebRequest once .NET 4.5 is the minimum
+ if (-not $ValidateCerts) {
+ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = { $true }
+ }
+
+ # Enable TLS1.1/TLS1.2 if they're available but disabled (eg. .NET 4.5)
+ $security_protocols = [System.Net.ServicePointManager]::SecurityProtocol -bor [System.Net.SecurityProtocolType]::SystemDefault
+ if ([System.Net.SecurityProtocolType].GetMember("Tls11").Count -gt 0) {
+ $security_protocols = $security_protocols -bor [System.Net.SecurityProtocolType]::Tls11
+ }
+ if ([System.Net.SecurityProtocolType].GetMember("Tls12").Count -gt 0) {
+ $security_protocols = $security_protocols -bor [System.Net.SecurityProtocolType]::Tls12
+ }
+ [System.Net.ServicePointManager]::SecurityProtocol = $security_protocols
+
+ $web_request = [System.Net.WebRequest]::Create($Uri)
+ if ($Method) {
+ $web_request.Method = $Method
+ }
+ $web_request.Timeout = $Timeout * 1000
+
+ if ($UseDefaultCredential -and $web_request -is [System.Net.HttpWebRequest]) {
+ $web_request.UseDefaultCredentials = $true
+ }
+ elseif ($UrlUsername) {
+ if ($ForceBasicAuth) {
+ $auth_value = [System.Convert]::ToBase64String([System.Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $UrlUsername, $UrlPassword)))
+ $web_request.Headers.Add("Authorization", "Basic $auth_value")
+ }
+ else {
+ $credential = New-Object -TypeName System.Net.NetworkCredential -ArgumentList $UrlUsername, $UrlPassword
+ $web_request.Credentials = $credential
+ }
+ }
+
+ if ($ClientCert) {
+ # Expecting either a filepath or PSPath (Cert:\CurrentUser\My\<thumbprint>)
+ $cert = Get-Item -LiteralPath $ClientCert -ErrorAction SilentlyContinue
+ if ($null -eq $cert) {
+ Write-Error -Message "Client certificate '$ClientCert' does not exist" -Category ObjectNotFound
+ return
+ }
+
+ $crypto_ns = 'System.Security.Cryptography.X509Certificates'
+ if ($cert.PSProvider.Name -ne 'Certificate') {
+ try {
+ $cert = New-Object -TypeName "$crypto_ns.X509Certificate2" -ArgumentList @(
+ $ClientCert, $ClientCertPassword
+ )
+ }
+ catch [System.Security.Cryptography.CryptographicException] {
+ Write-Error -Message "Failed to read client certificate at '$ClientCert'" -Exception $_.Exception -Category SecurityError
+ return
+ }
+ }
+ $web_request.ClientCertificates = New-Object -TypeName "$crypto_ns.X509Certificate2Collection" -ArgumentList @(
+ $cert
+ )
+ }
+
+ if (-not $UseProxy) {
+ $proxy = $null
+ }
+ elseif ($ProxyUrl) {
+ $proxy = New-Object -TypeName System.Net.WebProxy -ArgumentList $ProxyUrl, $true
+ }
+ else {
+ $proxy = $web_request.Proxy
+ }
+
+ # $web_request.Proxy may return $null for a FTP web request. We only set the credentials if we have an actual
+ # proxy to work with, otherwise just ignore the credentials property.
+ if ($null -ne $proxy) {
+ if ($ProxyUseDefaultCredential) {
+ # Weird hack, $web_request.Proxy returns an IWebProxy object which only guarantees the Credentials
+ # property. We cannot set UseDefaultCredentials so we just set the Credentials to the
+ # DefaultCredentials in the CredentialCache which does the same thing.
+ $proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials
+ }
+ elseif ($ProxyUsername) {
+ $proxy.Credentials = New-Object -TypeName System.Net.NetworkCredential -ArgumentList @(
+ $ProxyUsername, $ProxyPassword
+ )
+ }
+ else {
+ $proxy.Credentials = $null
+ }
+ }
+
+ $web_request.Proxy = $proxy
+
+ # Some parameters only apply when dealing with a HttpWebRequest
+ if ($web_request -is [System.Net.HttpWebRequest]) {
+ if ($Headers) {
+ foreach ($header in $Headers.GetEnumerator()) {
+ switch ($header.Key) {
+ Accept { $web_request.Accept = $header.Value }
+ Connection { $web_request.Connection = $header.Value }
+ Content-Length { $web_request.ContentLength = $header.Value }
+ Content-Type { $web_request.ContentType = $header.Value }
+ Expect { $web_request.Expect = $header.Value }
+ Date { $web_request.Date = $header.Value }
+ Host { $web_request.Host = $header.Value }
+ If-Modified-Since { $web_request.IfModifiedSince = $header.Value }
+ Range { $web_request.AddRange($header.Value) }
+ Referer { $web_request.Referer = $header.Value }
+ Transfer-Encoding {
+ $web_request.SendChunked = $true
+ $web_request.TransferEncoding = $header.Value
+ }
+ User-Agent { continue }
+ default { $web_request.Headers.Add($header.Key, $header.Value) }
+ }
+ }
+ }
+
+ # For backwards compatibility we need to support setting the User-Agent if the header was set in the task.
+ # We just need to make sure that if an explicit http_agent module was set then that takes priority.
+ if ($Headers -and $Headers.ContainsKey("User-Agent")) {
+ if ($HttpAgent -eq $ansible_web_request_options.http_agent.default) {
+ $HttpAgent = $Headers['User-Agent']
+ }
+ elseif ($null -ne $Module) {
+ $Module.Warn("The 'User-Agent' header and the 'http_agent' was set, using the 'http_agent' for web request")
+ }
+ }
+ $web_request.UserAgent = $HttpAgent
+
+ switch ($FollowRedirects) {
+ none { $web_request.AllowAutoRedirect = $false }
+ safe {
+ if ($web_request.Method -in @("GET", "HEAD")) {
+ $web_request.AllowAutoRedirect = $true
+ }
+ else {
+ $web_request.AllowAutoRedirect = $false
+ }
+ }
+ all { $web_request.AllowAutoRedirect = $true }
+ }
+
+ if ($MaximumRedirection -eq 0) {
+ $web_request.AllowAutoRedirect = $false
+ }
+ else {
+ $web_request.MaximumAutomaticRedirections = $MaximumRedirection
+ }
+ }
+
+ return $web_request
+}
+
+Function Invoke-WithWebRequest {
+ <#
+ .SYNOPSIS
+ Invokes a ScriptBlock with the WebRequest.
+
+ .DESCRIPTION
+ Invokes the ScriptBlock and handle extra information like accessing the response stream, closing those streams
+ safely as well as setting common module return values.
+
+ .PARAMETER Module
+ The Ansible.Basic module to set the return values for. This will set the following return values;
+ elapsed - The total time, in seconds, that it took to send the web request and process the response
+ msg - The human readable description of the response status code
+ status_code - An int that is the response status code
+
+ .PARAMETER Request
+ The System.Net.WebRequest to call. This can either be manually crafted or created with Get-AnsibleWebRequest.
+
+ .PARAMETER Script
+ The ScriptBlock to invoke during the web request. This ScriptBlock should take in the params
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ This scriptblock should manage the response based on what it need to do.
+
+ .PARAMETER Body
+ An optional Stream to send to the target during the request.
+
+ .PARAMETER IgnoreBadResponse
+ By default a WebException will be raised for a non 2xx status code and the Script will not be invoked. This
+ parameter can be set to process all responses regardless of the status code.
+
+ .EXAMPLE Basic module that downloads a file
+ $spec = @{
+ options = @{
+ path = @{ type = "path"; required = $true }
+ }
+ }
+ $module = Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+
+ $web_request = Get-AnsibleWebRequest -Module $module
+
+ Invoke-WithWebRequest -Module $module -Request $web_request -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $fs = [System.IO.File]::Create($module.Params.path)
+ try {
+ $Stream.CopyTo($fs)
+ $fs.Flush()
+ } finally {
+ $fs.Dispose()
+ }
+ }
+ #>
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory = $true)]
+ [System.Object]
+ [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })]
+ $Module,
+
+ [Parameter(Mandatory = $true)]
+ [System.Net.WebRequest]
+ $Request,
+
+ [Parameter(Mandatory = $true)]
+ [ScriptBlock]
+ $Script,
+
+ [AllowNull()]
+ [System.IO.Stream]
+ $Body,
+
+ [Switch]
+ $IgnoreBadResponse
+ )
+
+ $start = Get-Date
+ if ($null -ne $Body) {
+ $request_st = $Request.GetRequestStream()
+ try {
+ $Body.CopyTo($request_st)
+ $request_st.Flush()
+ }
+ finally {
+ $request_st.Close()
+ }
+ }
+
+ try {
+ try {
+ $web_response = $Request.GetResponse()
+ }
+ catch [System.Net.WebException] {
+ # A WebResponse with a status code not in the 200 range will raise a WebException. We check if the
+ # exception raised contains the actual response and continue on if IgnoreBadResponse is set. We also
+ # make sure we set the status_code return value on the Module object if possible
+
+ if ($_.Exception.PSObject.Properties.Name -match "Response") {
+ $web_response = $_.Exception.Response
+
+ if (-not $IgnoreBadResponse -or $null -eq $web_response) {
+ $Module.Result.msg = $_.Exception.StatusDescription
+ $Module.Result.status_code = $_.Exception.Response.StatusCode
+ throw $_
+ }
+ }
+ else {
+ throw $_
+ }
+ }
+
+ if ($Request.RequestUri.IsFile) {
+ # A FileWebResponse won't have these properties set
+ $Module.Result.msg = "OK"
+ $Module.Result.status_code = 200
+ }
+ else {
+ $Module.Result.msg = $web_response.StatusDescription
+ $Module.Result.status_code = $web_response.StatusCode
+ }
+
+ $response_stream = $web_response.GetResponseStream()
+ try {
+ # Invoke the ScriptBlock and pass in WebResponse and ResponseStream
+ &$Script -Response $web_response -Stream $response_stream
+ }
+ finally {
+ $response_stream.Dispose()
+ }
+ }
+ finally {
+ if ($web_response) {
+ $web_response.Close()
+ }
+ $Module.Result.elapsed = ((Get-date) - $start).TotalSeconds
+ }
+}
+
+Function Get-AnsibleWebRequestSpec {
+ <#
+ .SYNOPSIS
+ Used by modules to get the argument spec fragment for AnsibleModule.
+
+ .EXAMPLES
+ $spec = @{
+ options = @{}
+ }
+ $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+ #>
+ @{ options = $ansible_web_request_options }
+}
+
+# See lib/ansible/plugins/doc_fragments/url_windows.py
+# Kept here for backwards compat as this variable was added in Ansible 2.9. Ultimately this util should be removed
+# once the deprecation period has been added.
+$ansible_web_request_options = @{
+ method = @{ type = "str" }
+ follow_redirects = @{ type = "str"; choices = @("all", "none", "safe"); default = "safe" }
+ headers = @{ type = "dict" }
+ http_agent = @{ type = "str"; default = "ansible-httpget" }
+ maximum_redirection = @{ type = "int"; default = 50 }
+ timeout = @{ type = "int"; default = 30 } # Was defaulted to 10 in win_get_url but 30 in win_uri so we use 30
+ validate_certs = @{ type = "bool"; default = $true }
+
+ # Credential options
+ client_cert = @{ type = "str" }
+ client_cert_password = @{ type = "str"; no_log = $true }
+ force_basic_auth = @{ type = "bool"; default = $false }
+ url_username = @{ type = "str" }
+ url_password = @{ type = "str"; no_log = $true }
+ use_default_credential = @{ type = "bool"; default = $false }
+
+ # Proxy options
+ use_proxy = @{ type = "bool"; default = $true }
+ proxy_url = @{ type = "str" }
+ proxy_username = @{ type = "str" }
+ proxy_password = @{ type = "str"; no_log = $true }
+ proxy_use_default_credential = @{ type = "bool"; default = $false }
+}
+
+$export_members = @{
+ Function = "Get-AnsibleWebRequest", "Get-AnsibleWebRequestSpec", "Invoke-WithWebRequest"
+ Variable = "ansible_web_request_options"
+}
+Export-ModuleMember @export_members
diff --git a/lib/ansible/module_utils/powershell/__init__.py b/lib/ansible/module_utils/powershell/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/__init__.py
diff --git a/lib/ansible/module_utils/pycompat24.py b/lib/ansible/module_utils/pycompat24.py
new file mode 100644
index 0000000..c398427
--- /dev/null
+++ b/lib/ansible/module_utils/pycompat24.py
@@ -0,0 +1,91 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2015, Marius Gedminas
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+
+def get_exception():
+ """Get the current exception.
+
+ This code needs to work on Python 2.4 through 3.x, so we cannot use
+ "except Exception, e:" (SyntaxError on Python 3.x) nor
+ "except Exception as e:" (SyntaxError on Python 2.4-2.5).
+ Instead we must use ::
+
+ except Exception:
+ e = get_exception()
+
+ """
+ return sys.exc_info()[1]
+
+
+try:
+ # Python 2.6+
+ from ast import literal_eval
+except ImportError:
+ # a replacement for literal_eval that works with python 2.4. from:
+ # https://mail.python.org/pipermail/python-list/2009-September/551880.html
+ # which is essentially a cut/paste from an earlier (2.6) version of python's
+ # ast.py
+ from compiler import ast, parse
+ from ansible.module_utils.six import binary_type, integer_types, string_types, text_type
+
+ def literal_eval(node_or_string): # type: ignore[misc]
+ """
+ Safely evaluate an expression node or a string containing a Python
+ expression. The string or node provided may only consist of the following
+ Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
+ and None.
+ """
+ _safe_names = {'None': None, 'True': True, 'False': False}
+ if isinstance(node_or_string, string_types):
+ node_or_string = parse(node_or_string, mode='eval')
+ if isinstance(node_or_string, ast.Expression):
+ node_or_string = node_or_string.node
+
+ def _convert(node):
+ if isinstance(node, ast.Const) and isinstance(node.value, (text_type, binary_type, float, complex) + integer_types):
+ return node.value
+ elif isinstance(node, ast.Tuple):
+ return tuple(map(_convert, node.nodes))
+ elif isinstance(node, ast.List):
+ return list(map(_convert, node.nodes))
+ elif isinstance(node, ast.Dict):
+ return dict((_convert(k), _convert(v)) for k, v in node.items())
+ elif isinstance(node, ast.Name):
+ if node.name in _safe_names:
+ return _safe_names[node.name]
+ elif isinstance(node, ast.UnarySub):
+ return -_convert(node.expr) # pylint: disable=invalid-unary-operand-type
+ raise ValueError('malformed string')
+ return _convert(node_or_string)
+
+__all__ = ('get_exception', 'literal_eval')
diff --git a/lib/ansible/module_utils/service.py b/lib/ansible/module_utils/service.py
new file mode 100644
index 0000000..d2cecd4
--- /dev/null
+++ b/lib/ansible/module_utils/service.py
@@ -0,0 +1,274 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) Ansible Inc, 2016
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import pickle
+import platform
+import select
+import shlex
+import subprocess
+import traceback
+
+from ansible.module_utils.six import PY2, b
+from ansible.module_utils._text import to_bytes, to_text
+
+
+def sysv_is_enabled(name, runlevel=None):
+ '''
+ This function will check if the service name supplied
+ is enabled in any of the sysv runlevels
+
+ :arg name: name of the service to test for
+ :kw runlevel: runlevel to check (default: None)
+ '''
+ if runlevel:
+ if not os.path.isdir('/etc/rc0.d/'):
+ return bool(glob.glob('/etc/init.d/rc%s.d/S??%s' % (runlevel, name)))
+ return bool(glob.glob('/etc/rc%s.d/S??%s' % (runlevel, name)))
+ else:
+ if not os.path.isdir('/etc/rc0.d/'):
+ return bool(glob.glob('/etc/init.d/rc?.d/S??%s' % name))
+ return bool(glob.glob('/etc/rc?.d/S??%s' % name))
+
+
+def get_sysv_script(name):
+ '''
+ This function will return the expected path for an init script
+ corresponding to the service name supplied.
+
+ :arg name: name or path of the service to test for
+ '''
+ if name.startswith('/'):
+ result = name
+ else:
+ result = '/etc/init.d/%s' % name
+
+ return result
+
+
+def sysv_exists(name):
+ '''
+ This function will return True or False depending on
+ the existence of an init script corresponding to the service name supplied.
+
+ :arg name: name of the service to test for
+ '''
+ return os.path.exists(get_sysv_script(name))
+
+
+def get_ps(module, pattern):
+ '''
+ Last resort to find a service by trying to match pattern to programs in memory
+ '''
+ found = False
+ if platform.system() == 'SunOS':
+ flags = '-ef'
+ else:
+ flags = 'auxww'
+ psbin = module.get_bin_path('ps', True)
+
+ (rc, psout, pserr) = module.run_command([psbin, flags])
+ if rc == 0:
+ for line in psout.splitlines():
+ if pattern in line:
+ # FIXME: should add logic to prevent matching 'self', though that should be extremely rare
+ found = True
+ break
+ return found
+
+
+def fail_if_missing(module, found, service, msg=''):
+ '''
+ This function will return an error or exit gracefully depending on check mode status
+ and if the service is missing or not.
+
+ :arg module: is an AnsibleModule object, used for it's utility methods
+ :arg found: boolean indicating if services was found or not
+ :arg service: name of service
+ :kw msg: extra info to append to error/success msg when missing
+ '''
+ if not found:
+ module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
+
+
+def fork_process():
+ '''
+ This function performs the double fork process to detach from the
+ parent process and execute.
+ '''
+ pid = os.fork()
+
+ if pid == 0:
+ # Set stdin/stdout/stderr to /dev/null
+ fd = os.open(os.devnull, os.O_RDWR)
+
+ # clone stdin/out/err
+ for num in range(3):
+ if fd != num:
+ os.dup2(fd, num)
+
+ # close otherwise
+ if fd not in range(3):
+ os.close(fd)
+
+ # Make us a daemon
+ pid = os.fork()
+
+ # end if not in child
+ if pid > 0:
+ os._exit(0)
+
+ # get new process session and detach
+ sid = os.setsid()
+ if sid == -1:
+ raise Exception("Unable to detach session while daemonizing")
+
+ # avoid possible problems with cwd being removed
+ os.chdir("/")
+
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ return pid
+
+
+def daemonize(module, cmd):
+ '''
+ Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
+
+ :arg module: is an AnsibleModule object, used for it's utility methods
+ :arg cmd: is a list or string representing the command and options to run
+
+ This is complex because daemonization is hard for people.
+ What we do is daemonize a part of this module, the daemon runs the command,
+ picks up the return code and output, and returns it to the main process.
+ '''
+
+ # init some vars
+ chunk = 4096 # FIXME: pass in as arg?
+ errors = 'surrogate_or_strict'
+
+ # start it!
+ try:
+ pipe = os.pipe()
+ pid = fork_process()
+ except OSError:
+ module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
+ except Exception as exc:
+ module.fail_json(msg=to_text(exc), exception=traceback.format_exc())
+
+ # we don't do any locking as this should be a unique module/process
+ if pid == 0:
+ os.close(pipe[0])
+
+ # if command is string deal with py2 vs py3 conversions for shlex
+ if not isinstance(cmd, list):
+ if PY2:
+ cmd = shlex.split(to_bytes(cmd, errors=errors))
+ else:
+ cmd = shlex.split(to_text(cmd, errors=errors))
+
+ # make sure we always use byte strings
+ run_cmd = []
+ for c in cmd:
+ run_cmd.append(to_bytes(c, errors=errors))
+
+ # execute the command in forked process
+ p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
+ fds = [p.stdout, p.stderr]
+
+ # loop reading output till its done
+ output = {p.stdout: b(""), p.stderr: b("")}
+ while fds:
+ rfd, wfd, efd = select.select(fds, [], fds, 1)
+ if (rfd + wfd + efd) or p.poll():
+ for out in list(fds):
+ if out in rfd:
+ data = os.read(out.fileno(), chunk)
+ if not data:
+ fds.remove(out)
+ output[out] += b(data)
+
+ # even after fds close, we might want to wait for pid to die
+ p.wait()
+
+ # Return a pickled data of parent
+ return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
+ os.write(pipe[1], to_bytes(return_data, errors=errors))
+
+ # clean up
+ os.close(pipe[1])
+ os._exit(0)
+
+ elif pid == -1:
+ module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
+
+ else:
+ # in parent
+ os.close(pipe[1])
+ os.waitpid(pid, 0)
+
+ # Grab response data after child finishes
+ return_data = b("")
+ while True:
+ rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
+ if pipe[0] in rfd:
+ data = os.read(pipe[0], chunk)
+ if not data:
+ break
+ return_data += b(data)
+
+ # Note: no need to specify encoding on py3 as this module sends the
+ # pickle to itself (thus same python interpreter so we aren't mixing
+ # py2 and py3)
+ return pickle.loads(to_bytes(return_data, errors=errors))
+
+
+def check_ps(module, pattern):
+
+ # Set ps flags
+ if platform.system() == 'SunOS':
+ psflags = '-ef'
+ else:
+ psflags = 'auxww'
+
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ (rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ for line in out.split('\n'):
+ if pattern in line:
+ return True
+ return False
diff --git a/lib/ansible/module_utils/six/__init__.py b/lib/ansible/module_utils/six/__init__.py
new file mode 100644
index 0000000..f2d41c8
--- /dev/null
+++ b/lib/ansible/module_utils/six/__init__.py
@@ -0,0 +1,1009 @@
+# This code is strewn with things that are not defined on Python3 (unicode,
+# long, etc) but they are all shielded by version checks. This is also an
+# upstream vendored file that we're not going to modify on our own
+# pylint: disable=undefined-variable
+#
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+# The following makes it easier for us to script updates of the bundled code. It is not part of
+# upstream six
+_BUNDLED_METADATA = {"pypi_name": "six", "version": "1.16.0"}
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py
new file mode 100644
index 0000000..c170b1c
--- /dev/null
+++ b/lib/ansible/module_utils/splitter.py
@@ -0,0 +1,219 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def _get_quote_state(token, quote_char):
+ '''
+ the goal of this block is to determine if the quoted string
+ is unterminated in which case it needs to be put back together
+ '''
+ # the char before the current one, used to see if
+ # the current character is escaped
+ prev_char = None
+ for idx, cur_char in enumerate(token):
+ if idx > 0:
+ prev_char = token[idx - 1]
+ if cur_char in '"\'' and prev_char != '\\':
+ if quote_char:
+ if cur_char == quote_char:
+ quote_char = None
+ else:
+ quote_char = cur_char
+ return quote_char
+
+
+def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
+ '''
+ this function counts the number of opening/closing blocks for a
+ given opening/closing type and adjusts the current depth for that
+ block based on the difference
+ '''
+ num_open = token.count(open_token)
+ num_close = token.count(close_token)
+ if num_open != num_close:
+ cur_depth += (num_open - num_close)
+ if cur_depth < 0:
+ cur_depth = 0
+ return cur_depth
+
+
+def split_args(args):
+ '''
+ Splits args on whitespace, but intelligently reassembles
+ those that may have been split over a jinja2 block or quotes.
+
+ When used in a remote module, we won't ever have to be concerned about
+ jinja2 blocks, however this function is/will be used in the
+ core portions as well before the args are templated.
+
+ example input: a=b c="foo bar"
+ example output: ['a=b', 'c="foo bar"']
+
+ Basically this is a variation shlex that has some more intelligence for
+ how Ansible needs to use it.
+ '''
+
+ # the list of params parsed out of the arg string
+ # this is going to be the result value when we are donei
+ params = []
+
+ # here we encode the args, so we have a uniform charset to
+ # work with, and split on white space
+ args = args.strip()
+ try:
+ args = args.encode('utf-8')
+ do_decode = True
+ except UnicodeDecodeError:
+ do_decode = False
+ items = args.split('\n')
+
+ # iterate over the tokens, and reassemble any that may have been
+ # split on a space inside a jinja2 block.
+ # ex if tokens are "{{", "foo", "}}" these go together
+
+ # These variables are used
+ # to keep track of the state of the parsing, since blocks and quotes
+ # may be nested within each other.
+
+ quote_char = None
+ inside_quotes = False
+ print_depth = 0 # used to count nested jinja2 {{ }} blocks
+ block_depth = 0 # used to count nested jinja2 {% %} blocks
+ comment_depth = 0 # used to count nested jinja2 {# #} blocks
+
+ # now we loop over each split chunk, coalescing tokens if the white space
+ # split occurred within quotes or a jinja2 block of some kind
+ for itemidx, item in enumerate(items):
+
+ # we split on spaces and newlines separately, so that we
+ # can tell which character we split on for reassembly
+ # inside quotation characters
+ tokens = item.strip().split(' ')
+
+ line_continuation = False
+ for idx, token in enumerate(tokens):
+
+ # if we hit a line continuation character, but
+ # we're not inside quotes, ignore it and continue
+ # on to the next token while setting a flag
+ if token == '\\' and not inside_quotes:
+ line_continuation = True
+ continue
+
+ # store the previous quoting state for checking later
+ was_inside_quotes = inside_quotes
+ quote_char = _get_quote_state(token, quote_char)
+ inside_quotes = quote_char is not None
+
+ # multiple conditions may append a token to the list of params,
+ # so we keep track with this flag to make sure it only happens once
+ # append means add to the end of the list, don't append means concatenate
+ # it to the end of the last token
+ appended = False
+
+ # if we're inside quotes now, but weren't before, append the token
+ # to the end of the list, since we'll tack on more to it later
+ # otherwise, if we're inside any jinja2 block, inside quotes, or we were
+ # inside quotes (but aren't now) concat this token to the last param
+ if inside_quotes and not was_inside_quotes:
+ params.append(token)
+ appended = True
+ elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
+ if idx == 0 and not inside_quotes and was_inside_quotes:
+ params[-1] = "%s%s" % (params[-1], token)
+ elif len(tokens) > 1:
+ spacer = ''
+ if idx > 0:
+ spacer = ' '
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ else:
+ spacer = ''
+ if not params[-1].endswith('\n') and idx == 0:
+ spacer = '\n'
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ appended = True
+
+ # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
+ # and may append the current token to the params (if we haven't previously done so)
+ prev_print_depth = print_depth
+ print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
+ if print_depth != prev_print_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_block_depth = block_depth
+ block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
+ if block_depth != prev_block_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_comment_depth = comment_depth
+ comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
+ if comment_depth != prev_comment_depth and not appended:
+ params.append(token)
+ appended = True
+
+ # finally, if we're at zero depth for all blocks and not inside quotes, and have not
+ # yet appended anything to the list of params, we do so now
+ if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
+ params.append(token)
+
+ # if this was the last token in the list, and we have more than
+ # one item (meaning we split on newlines), add a newline back here
+ # to preserve the original structure
+ if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
+ if not params[-1].endswith('\n') or item == '':
+ params[-1] += '\n'
+
+ # always clear the line continuation flag
+ line_continuation = False
+
+ # If we're done and things are not at zero depth or we're still inside quotes,
+ # raise an error to indicate that the args were unbalanced
+ if print_depth or block_depth or comment_depth or inside_quotes:
+ raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
+
+ # finally, we decode each param back to the unicode it was in the arg string
+ if do_decode:
+ params = [x.decode('utf-8') for x in params]
+
+ return params
+
+
+def is_quoted(data):
+ return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
+
+
+def unquote(data):
+ ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
+ if is_quoted(data):
+ return data[1:-1]
+ return data
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
new file mode 100644
index 0000000..542f89b
--- /dev/null
+++ b/lib/ansible/module_utils/urls.py
@@ -0,0 +1,2070 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+#
+# The match_hostname function and supporting code is under the terms and
+# conditions of the Python Software Foundation License. They were taken from
+# the Python3 standard library and adapted for use in Python2. See comments in the
+# source for which code precisely is under this License.
+#
+# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+
+
+'''
+The **urls** utils module offers a replacement for the urllib2 python library.
+
+urllib2 is the python stdlib way to retrieve files from the Internet but it
+lacks some security features (around verifying SSL certificates) that users
+should care about in most situations. Using the functions in this module corrects
+deficiencies in the urllib2 module wherever possible.
+
+There are also third-party libraries (for instance, requests) which can be used
+to replace urllib2 with a more secure library. However, all third party libraries
+require that the library be installed on the managed machine. That is an extra step
+for users making use of a module. If possible, avoid third party libraries by using
+this code instead.
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import base64
+import email.mime.multipart
+import email.mime.nonmultipart
+import email.mime.application
+import email.parser
+import email.utils
+import functools
+import io
+import mimetypes
+import netrc
+import os
+import platform
+import re
+import socket
+import sys
+import tempfile
+import traceback
+import types
+
+from contextlib import contextmanager
+
+try:
+ import gzip
+ HAS_GZIP = True
+ GZIP_IMP_ERR = None
+except ImportError:
+ HAS_GZIP = False
+ GZIP_IMP_ERR = traceback.format_exc()
+ GzipFile = object
+else:
+ GzipFile = gzip.GzipFile # type: ignore[assignment,misc]
+
+try:
+ import email.policy
+except ImportError:
+ # Py2
+ import email.generator
+
+try:
+ import httplib
+except ImportError:
+ # Python 3
+ import http.client as httplib # type: ignore[no-redef]
+
+import ansible.module_utils.compat.typing as t
+import ansible.module_utils.six.moves.http_cookiejar as cookiejar
+import ansible.module_utils.six.moves.urllib.error as urllib_error
+
+from ansible.module_utils.common.collections import Mapping, is_sequence
+from ansible.module_utils.six import PY2, PY3, string_types
+from ansible.module_utils.six.moves import cStringIO
+from ansible.module_utils.basic import get_distribution, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+try:
+ # python3
+ import urllib.request as urllib_request
+ from urllib.request import AbstractHTTPHandler, BaseHandler
+except ImportError:
+ # python2
+ import urllib2 as urllib_request # type: ignore[no-redef]
+ from urllib2 import AbstractHTTPHandler, BaseHandler # type: ignore[no-redef]
+
+urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307 # type: ignore[attr-defined]
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse, unquote
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+
+try:
+ import ssl
+ HAS_SSL = True
+except Exception:
+ HAS_SSL = False
+
+try:
+ # SNI Handling needs python2.7.9's SSLContext
+ from ssl import create_default_context, SSLContext
+ HAS_SSLCONTEXT = True
+except ImportError:
+ HAS_SSLCONTEXT = False
+
+# SNI Handling for python < 2.7.9 with urllib3 support
+HAS_URLLIB3_PYOPENSSLCONTEXT = False
+HAS_URLLIB3_SSL_WRAP_SOCKET = False
+if not HAS_SSLCONTEXT:
+ try:
+ # urllib3>=1.15
+ try:
+ from urllib3.contrib.pyopenssl import PyOpenSSLContext
+ except Exception:
+ from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
+ HAS_URLLIB3_PYOPENSSLCONTEXT = True
+ except Exception:
+ # urllib3<1.15,>=1.6
+ try:
+ try:
+ from urllib3.contrib.pyopenssl import ssl_wrap_socket
+ except Exception:
+ from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
+ HAS_URLLIB3_SSL_WRAP_SOCKET = True
+ except Exception:
+ pass
+
+# Select a protocol that includes all secure tls protocols
+# Exclude insecure ssl protocols if possible
+
+if HAS_SSL:
+ # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
+ PROTOCOL = ssl.PROTOCOL_TLSv1
+if not HAS_SSLCONTEXT and HAS_SSL:
+ try:
+ import ctypes
+ import ctypes.util
+ except ImportError:
+ # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
+ pass
+ else:
+ libssl_name = ctypes.util.find_library('ssl')
+ libssl = ctypes.CDLL(libssl_name)
+ for method in ('TLSv1_1_method', 'TLSv1_2_method'):
+ try:
+ libssl[method]
+ # Found something - we'll let openssl autonegotiate and hope
+ # the server has disabled sslv2 and 3. best we can do.
+ PROTOCOL = ssl.PROTOCOL_SSLv23
+ break
+ except AttributeError:
+ pass
+ del libssl
+
+
+# The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname
+# The bundled backports.ssl_match_hostname should really be moved into its own file for processing
+_BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"}
+
+LOADED_VERIFY_LOCATIONS = set() # type: t.Set[str]
+
+HAS_MATCH_HOSTNAME = True
+try:
+ from ssl import match_hostname, CertificateError
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import match_hostname, CertificateError # type: ignore[misc]
+ except ImportError:
+ HAS_MATCH_HOSTNAME = False
+
+HAS_CRYPTOGRAPHY = True
+try:
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.exceptions import UnsupportedAlgorithm
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+
+# Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility.
+try:
+ import urllib_gssapi
+ HAS_GSSAPI = True
+except ImportError:
+ HAS_GSSAPI = False
+
+GSSAPI_IMP_ERR = None
+try:
+ import gssapi
+
+ class HTTPGSSAPIAuthHandler(BaseHandler):
+ """ Handles Negotiate/Kerberos support through the gssapi library. """
+
+ AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I)
+ handler_order = 480 # Handle before Digest authentication
+
+ def __init__(self, username=None, password=None):
+ self.username = username
+ self.password = password
+ self._context = None
+
+ def get_auth_value(self, headers):
+ auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', ''))
+ if auth_match:
+ return auth_match.group(1), base64.b64decode(auth_match.group(2))
+
+ def http_error_401(self, req, fp, code, msg, headers):
+ # If we've already attempted the auth and we've reached this again then there was a failure.
+ if self._context:
+ return
+
+ parsed = generic_urlparse(urlparse(req.get_full_url()))
+
+ auth_header = self.get_auth_value(headers)
+ if not auth_header:
+ return
+ auth_protocol, in_token = auth_header
+
+ username = None
+ if self.username:
+ username = gssapi.Name(self.username, name_type=gssapi.NameType.user)
+
+ if username and self.password:
+ if not hasattr(gssapi.raw, 'acquire_cred_with_password'):
+ raise NotImplementedError("Platform GSSAPI library does not support "
+ "gss_acquire_cred_with_password, cannot acquire GSSAPI credential with "
+ "explicit username and password.")
+
+ b_password = to_bytes(self.password, errors='surrogate_or_strict')
+ cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds
+
+ else:
+ cred = gssapi.Credentials(name=username, usage='initiate')
+
+ # Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the
+ # authentication to fail when the CBT is present. Just skip that platform.
+ cbt = None
+ cert = getpeercert(fp, True)
+ if cert and platform.system() != 'Darwin':
+ cert_hash = get_channel_binding_cert_hash(cert)
+ if cert_hash:
+ cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash)
+
+ # TODO: We could add another option that is set to include the port in the SPN if desired in the future.
+ target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service)
+ self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt)
+
+ resp = None
+ while not self._context.complete:
+ out_token = self._context.step(in_token)
+ if not out_token:
+ break
+
+ auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token)))
+ req.add_unredirected_header('Authorization', auth_header)
+ resp = self.parent.open(req)
+
+ # The response could contain a token that the client uses to validate the server
+ auth_header = self.get_auth_value(resp.headers)
+ if not auth_header:
+ break
+ in_token = auth_header[1]
+
+ return resp
+
+except ImportError:
+ GSSAPI_IMP_ERR = traceback.format_exc()
+ HTTPGSSAPIAuthHandler = None # type: types.ModuleType | None # type: ignore[no-redef]
+
+if not HAS_MATCH_HOSTNAME:
+ # The following block of code is under the terms and conditions of the
+ # Python Software Foundation License
+
+ """The match_hostname() function from Python 3.4, essential when using SSL."""
+
+ try:
+ # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not
+ from _ssl import SSLCertVerificationError
+ CertificateError = SSLCertVerificationError # type: ignore[misc]
+ except ImportError:
+ class CertificateError(ValueError): # type: ignore[no-redef]
+ pass
+
+ def _dnsname_match(dn, hostname):
+ """Matching according to RFC 6125, section 6.4.3
+
+ - Hostnames are compared lower case.
+ - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE).
+ - Partial wildcards like 'www*.example.org', multiple wildcards, sole
+ wildcard or wildcards in labels other then the left-most label are not
+ supported and a CertificateError is raised.
+ - A wildcard must match at least one character.
+ """
+ if not dn:
+ return False
+
+ wildcards = dn.count('*')
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ if wildcards > 1:
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: %s" % repr(dn))
+
+ dn_leftmost, sep, dn_remainder = dn.partition('.')
+
+ if '*' in dn_remainder:
+ # Only match wildcard in leftmost segment.
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "wildcard can only be present in the leftmost label: "
+ "%s." % repr(dn))
+
+ if not sep:
+ # no right side
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "sole wildcard without additional labels are not support: "
+ "%s." % repr(dn))
+
+ if dn_leftmost != '*':
+ # no partial wildcard matching
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "partial wildcards in leftmost label are not supported: "
+ "%s." % repr(dn))
+
+ hostname_leftmost, sep, hostname_remainder = hostname.partition('.')
+ if not hostname_leftmost or not sep:
+ # wildcard must match at least one char
+ return False
+ return dn_remainder.lower() == hostname_remainder.lower()
+
+ def _inet_paton(ipname):
+ """Try to convert an IP address to packed binary form
+
+ Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6
+ support.
+ """
+ # inet_aton() also accepts strings like '1'
+ # Divergence: We make sure we have native string type for all python versions
+ try:
+ b_ipname = to_bytes(ipname, errors='strict')
+ except UnicodeError:
+ raise ValueError("%s must be an all-ascii string." % repr(ipname))
+
+ # Set ipname in native string format
+ if sys.version_info < (3,):
+ n_ipname = b_ipname
+ else:
+ n_ipname = ipname
+
+ if n_ipname.count('.') == 3:
+ try:
+ return socket.inet_aton(n_ipname)
+ # Divergence: OSError on late python3. socket.error earlier.
+ # Null bytes generate ValueError on python3(we want to raise
+ # ValueError anyway), TypeError # earlier
+ except (OSError, socket.error, TypeError):
+ pass
+
+ try:
+ return socket.inet_pton(socket.AF_INET6, n_ipname)
+ # Divergence: OSError on late python3. socket.error earlier.
+ # Null bytes generate ValueError on python3(we want to raise
+ # ValueError anyway), TypeError # earlier
+ except (OSError, socket.error, TypeError):
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise ValueError("%s is neither an IPv4 nor an IP6 "
+ "address." % repr(ipname))
+ except AttributeError:
+ # AF_INET6 not available
+ pass
+
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise ValueError("%s is not an IPv4 address." % repr(ipname))
+
+ def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ ip = _inet_paton(ipname.rstrip())
+ return ip == host_ip
+
+ def match_hostname(cert, hostname): # type: ignore[misc]
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed.
+
+ The function matches IP addresses rather than dNSNames if hostname is a
+ valid ipaddress string. IPv4 addresses are supported on all platforms.
+ IPv6 addresses are supported on platforms with IPv6 support (AF_INET6
+ and inet_pton).
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence: Deal with hostname as bytes
+ host_ip = _inet_paton(to_text(hostname, errors='strict'))
+ except UnicodeError:
+ # Divergence: Deal with hostname as byte strings.
+ # IP addresses should be all ascii, so we consider it not
+ # an IP address if this fails
+ host_ip = None
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or subjectAltName fields were found")
+
+ # End of Python Software Foundation Licensed code
+
+ HAS_MATCH_HOSTNAME = True
+
+
+# This is a dummy cacert provided for macOS since you need at least 1
+# ca cert, regardless of validity, for Python on macOS to use the
+# keychain functionality in OpenSSL for validating SSL certificates.
+# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
+b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
+MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
+BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
+MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
+MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
+VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
+gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
+gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
+4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
+gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
+FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
+CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
+aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
+MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
+qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
+zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
+-----END CERTIFICATE-----
+"""
+
+b_PEM_CERT_RE = re.compile(
+ br'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$',
+ flags=re.M | re.S
+)
+
+#
+# Exceptions
+#
+
+
+class ConnectionError(Exception):
+ """Failed to connect to the server"""
+ pass
+
+
+class ProxyError(ConnectionError):
+ """Failure to connect because of a proxy"""
+ pass
+
+
+class SSLValidationError(ConnectionError):
+ """Failure to connect due to SSL validation failing"""
+ pass
+
+
+class NoSSLError(SSLValidationError):
+ """Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
+ pass
+
+
+class MissingModuleError(Exception):
+ """Failed to import 3rd party module required by the caller"""
+ def __init__(self, message, import_traceback, module=None):
+ super(MissingModuleError, self).__init__(message)
+ self.import_traceback = import_traceback
+ self.module = module
+
+
+# Some environments (Google Compute Engine's CoreOS deploys) do not compile
+# against openssl and thus do not have any HTTPS support.
+CustomHTTPSConnection = None
+CustomHTTPSHandler = None
+HTTPSClientAuthHandler = None
+UnixHTTPSConnection = None
+if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
+ class CustomHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef]
+ def __init__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ self.context = None
+ if HAS_SSLCONTEXT:
+ self.context = self._context
+ elif HAS_URLLIB3_PYOPENSSLCONTEXT:
+ self.context = self._context = PyOpenSSLContext(PROTOCOL)
+ if self.context and self.cert_file:
+ self.context.load_cert_chain(self.cert_file, self.key_file)
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ if hasattr(self, 'source_address'):
+ sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
+ else:
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+
+ server_hostname = self.host
+ # Note: self._tunnel_host is not available on py < 2.6 but this code
+ # isn't used on py < 2.6 (lack of create_connection)
+ if self._tunnel_host:
+ self.sock = sock
+ self._tunnel()
+ server_hostname = self._tunnel_host
+
+ if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
+ self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, # pylint: disable=used-before-assignment
+ certfile=self.cert_file, ssl_version=PROTOCOL, server_hostname=server_hostname)
+ else:
+ self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
+
+ class CustomHTTPSHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef]
+
+ def https_open(self, req):
+ kwargs = {}
+ if HAS_SSLCONTEXT:
+ kwargs['context'] = self._context
+ return self.do_open(
+ functools.partial(
+ CustomHTTPSConnection,
+ **kwargs
+ ),
+ req
+ )
+
+ https_request = AbstractHTTPHandler.do_request_
+
+ class HTTPSClientAuthHandler(urllib_request.HTTPSHandler): # type: ignore[no-redef]
+ '''Handles client authentication via cert/key
+
+ This is a fairly lightweight extension on HTTPSHandler, and can be used
+ in place of HTTPSHandler
+ '''
+
+ def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs):
+ urllib_request.HTTPSHandler.__init__(self, **kwargs)
+ self.client_cert = client_cert
+ self.client_key = client_key
+ self._unix_socket = unix_socket
+
+ def https_open(self, req):
+ return self.do_open(self._build_https_connection, req)
+
+ def _build_https_connection(self, host, **kwargs):
+ kwargs.update({
+ 'cert_file': self.client_cert,
+ 'key_file': self.client_key,
+ })
+ try:
+ kwargs['context'] = self._context
+ except AttributeError:
+ pass
+ if self._unix_socket:
+ return UnixHTTPSConnection(self._unix_socket)(host, **kwargs)
+ if not HAS_SSLCONTEXT:
+ return CustomHTTPSConnection(host, **kwargs)
+ return httplib.HTTPSConnection(host, **kwargs)
+
+ @contextmanager
+ def unix_socket_patch_httpconnection_connect():
+ '''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect``
+ so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the
+ correct behavior of creating self.sock for the unix socket
+ '''
+ _connect = httplib.HTTPConnection.connect
+ httplib.HTTPConnection.connect = UnixHTTPConnection.connect
+ yield
+ httplib.HTTPConnection.connect = _connect
+
+ class UnixHTTPSConnection(httplib.HTTPSConnection): # type: ignore[no-redef]
+ def __init__(self, unix_socket):
+ self._unix_socket = unix_socket
+
+ def connect(self):
+ # This method exists simply to ensure we monkeypatch
+ # httplib.HTTPConnection.connect to call UnixHTTPConnection.connect
+ with unix_socket_patch_httpconnection_connect():
+ # Disable pylint check for the super() call. It complains about UnixHTTPSConnection
+ # being a NoneType because of the initial definition above, but it won't actually
+ # be a NoneType when this code runs
+ # pylint: disable=bad-super-call
+ super(UnixHTTPSConnection, self).connect()
+
+ def __call__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ return self
+
+
+class UnixHTTPConnection(httplib.HTTPConnection):
+ '''Handles http requests to a unix socket file'''
+
+ def __init__(self, unix_socket):
+ self._unix_socket = unix_socket
+
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ self.sock.connect(self._unix_socket)
+ except OSError as e:
+ raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e))
+ if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ self.sock.settimeout(self.timeout)
+
+ def __call__(self, *args, **kwargs):
+ httplib.HTTPConnection.__init__(self, *args, **kwargs)
+ return self
+
+
+class UnixHTTPHandler(urllib_request.HTTPHandler):
+ '''Handler for Unix urls'''
+
+ def __init__(self, unix_socket, **kwargs):
+ urllib_request.HTTPHandler.__init__(self, **kwargs)
+ self._unix_socket = unix_socket
+
+ def http_open(self, req):
+ return self.do_open(UnixHTTPConnection(self._unix_socket), req)
+
+
+class ParseResultDottedDict(dict):
+ '''
+ A dict that acts similarly to the ParseResult named tuple from urllib
+ '''
+ def __init__(self, *args, **kwargs):
+ super(ParseResultDottedDict, self).__init__(*args, **kwargs)
+ self.__dict__ = self
+
+ def as_list(self):
+ '''
+ Generate a list from this dict, that looks like the ParseResult named tuple
+ '''
+ return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')]
+
+
+def generic_urlparse(parts):
+ '''
+ Returns a dictionary of url parts as parsed by urlparse,
+ but accounts for the fact that older versions of that
+ library do not support named attributes (ie. .netloc)
+ '''
+ generic_parts = ParseResultDottedDict()
+ if hasattr(parts, 'netloc'):
+ # urlparse is newer, just read the fields straight
+ # from the parts object
+ generic_parts['scheme'] = parts.scheme
+ generic_parts['netloc'] = parts.netloc
+ generic_parts['path'] = parts.path
+ generic_parts['params'] = parts.params
+ generic_parts['query'] = parts.query
+ generic_parts['fragment'] = parts.fragment
+ generic_parts['username'] = parts.username
+ generic_parts['password'] = parts.password
+ hostname = parts.hostname
+ if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc:
+ # Py2.6 doesn't parse IPv6 addresses correctly
+ hostname = parts.netloc.split(']')[0][1:].lower()
+ generic_parts['hostname'] = hostname
+
+ try:
+ port = parts.port
+ except ValueError:
+ # Py2.6 doesn't parse IPv6 addresses correctly
+ netloc = parts.netloc.split('@')[-1].split(']')[-1]
+ if ':' in netloc:
+ port = netloc.split(':')[1]
+ if port:
+ port = int(port)
+ else:
+ port = None
+ generic_parts['port'] = port
+ else:
+ # we have to use indexes, and then parse out
+ # the other parts not supported by indexing
+ generic_parts['scheme'] = parts[0]
+ generic_parts['netloc'] = parts[1]
+ generic_parts['path'] = parts[2]
+ generic_parts['params'] = parts[3]
+ generic_parts['query'] = parts[4]
+ generic_parts['fragment'] = parts[5]
+ # get the username, password, etc.
+ try:
+ netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
+ match = netloc_re.match(parts[1])
+ auth = match.group(1)
+ hostname = match.group(2)
+ port = match.group(3)
+ if port:
+ # the capture group for the port will include the ':',
+ # so remove it and convert the port to an integer
+ port = int(port[1:])
+ if auth:
+ # the capture group above includes the @, so remove it
+ # and then split it up based on the first ':' found
+ auth = auth[:-1]
+ username, password = auth.split(':', 1)
+ else:
+ username = password = None
+ generic_parts['username'] = username
+ generic_parts['password'] = password
+ generic_parts['hostname'] = hostname
+ generic_parts['port'] = port
+ except Exception:
+ generic_parts['username'] = None
+ generic_parts['password'] = None
+ generic_parts['hostname'] = parts[1]
+ generic_parts['port'] = None
+ return generic_parts
+
+
+def extract_pem_certs(b_data):
+ for match in b_PEM_CERT_RE.finditer(b_data):
+ yield match.group(0)
+
+
+def get_response_filename(response):
+ url = response.geturl()
+ path = urlparse(url)[2]
+ filename = os.path.basename(path.rstrip('/')) or None
+ if filename:
+ filename = unquote(filename)
+
+ return response.headers.get_param('filename', header='content-disposition') or filename
+
+
+def parse_content_type(response):
+ if PY2:
+ get_type = response.headers.gettype
+ get_param = response.headers.getparam
+ else:
+ get_type = response.headers.get_content_type
+ get_param = response.headers.get_param
+
+ content_type = (get_type() or 'application/octet-stream').split(',')[0]
+ main_type, sub_type = content_type.split('/')
+ charset = (get_param('charset') or 'utf-8').split(',')[0]
+ return content_type, main_type, sub_type, charset
+
+
+class GzipDecodedReader(GzipFile):
+ """A file-like object to decode a response encoded with the gzip
+ method, as described in RFC 1952.
+
+ Largely copied from ``xmlrpclib``/``xmlrpc.client``
+ """
+ def __init__(self, fp):
+ if not HAS_GZIP:
+ raise MissingModuleError(self.missing_gzip_error(), import_traceback=GZIP_IMP_ERR)
+
+ if PY3:
+ self._io = fp
+ else:
+ # Py2 ``HTTPResponse``/``addinfourl`` doesn't support all of the file object
+ # functionality GzipFile requires
+ self._io = io.BytesIO()
+ for block in iter(functools.partial(fp.read, 65536), b''):
+ self._io.write(block)
+ self._io.seek(0)
+ fp.close()
+ gzip.GzipFile.__init__(self, mode='rb', fileobj=self._io) # pylint: disable=non-parent-init-called
+
+ def close(self):
+ try:
+ gzip.GzipFile.close(self)
+ finally:
+ self._io.close()
+
+ @staticmethod
+ def missing_gzip_error():
+ return missing_required_lib(
+ 'gzip',
+ reason='to decompress gzip encoded responses. '
+ 'Set "decompress" to False, to prevent attempting auto decompression'
+ )
+
+
+class RequestWithMethod(urllib_request.Request):
+ '''
+ Workaround for using DELETE/PUT/etc with urllib2
+ Originally contained in library/net_infrastructure/dnsmadeeasy
+ '''
+
+ def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True):
+ if headers is None:
+ headers = {}
+ self._method = method.upper()
+ urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
+
+ def get_method(self):
+ if self._method:
+ return self._method
+ else:
+ return urllib_request.Request.get_method(self)
+
+
+def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None, ciphers=None):
+ """This is a class factory that closes over the value of
+ ``follow_redirects`` so that the RedirectHandler class has access to
+ that value without having to use globals, and potentially cause problems
+ where ``open_url`` or ``fetch_url`` are used multiple times in a module.
+ """
+
+ class RedirectHandler(urllib_request.HTTPRedirectHandler):
+ """This is an implementation of a RedirectHandler to match the
+ functionality provided by httplib2. It will utilize the value of
+ ``follow_redirects`` that is passed into ``RedirectHandlerFactory``
+ to determine how redirects should be handled in urllib2.
+ """
+
+ def redirect_request(self, req, fp, code, msg, hdrs, newurl):
+ if not any((HAS_SSLCONTEXT, HAS_URLLIB3_PYOPENSSLCONTEXT)):
+ handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path, ciphers=ciphers)
+ if handler:
+ urllib_request._opener.add_handler(handler)
+
+ # Preserve urllib2 compatibility
+ if follow_redirects == 'urllib2':
+ return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
+
+ # Handle disabled redirects
+ elif follow_redirects in ['no', 'none', False]:
+ raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
+
+ method = req.get_method()
+
+ # Handle non-redirect HTTP status or invalid follow_redirects
+ if follow_redirects in ['all', 'yes', True]:
+ if code < 300 or code >= 400:
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+ elif follow_redirects == 'safe':
+ if code < 300 or code >= 400 or method not in ('GET', 'HEAD'):
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+ else:
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+
+ try:
+ # Python 2-3.3
+ data = req.get_data()
+ origin_req_host = req.get_origin_req_host()
+ except AttributeError:
+ # Python 3.4+
+ data = req.data
+ origin_req_host = req.origin_req_host
+
+ # Be conciliant with URIs containing a space
+ newurl = newurl.replace(' ', '%20')
+
+ # Support redirect with payload and original headers
+ if code in (307, 308):
+ # Preserve payload and headers
+ headers = req.headers
+ else:
+ # Do not preserve payload and filter headers
+ data = None
+ headers = dict((k, v) for k, v in req.headers.items()
+ if k.lower() not in ("content-length", "content-type", "transfer-encoding"))
+
+ # http://tools.ietf.org/html/rfc7231#section-6.4.4
+ if code == 303 and method != 'HEAD':
+ method = 'GET'
+
+ # Do what the browsers do, despite standards...
+ # First, turn 302s into GETs.
+ if code == 302 and method != 'HEAD':
+ method = 'GET'
+
+ # Second, if a POST is responded to with a 301, turn it into a GET.
+ if code == 301 and method == 'POST':
+ method = 'GET'
+
+ return RequestWithMethod(newurl,
+ method=method,
+ headers=headers,
+ data=data,
+ origin_req_host=origin_req_host,
+ unverifiable=True,
+ )
+
+ return RedirectHandler
+
+
+def build_ssl_validation_error(hostname, port, paths, exc=None):
+ '''Inteligently build out the SSLValidationError based on what support
+ you have installed
+ '''
+
+ msg = [
+ ('Failed to validate the SSL certificate for %s:%s.'
+ ' Make sure your managed systems have a valid CA'
+ ' certificate installed.')
+ ]
+ if not HAS_SSLCONTEXT:
+ msg.append('If the website serving the url uses SNI you need'
+ ' python >= 2.7.9 on your managed machine')
+ msg.append(' (the python executable used (%s) is version: %s)' %
+ (sys.executable, ''.join(sys.version.splitlines())))
+ if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET:
+ msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
+ ' `ndg-httpsclient`, and `pyasn1` python modules')
+
+ msg.append('to perform SNI verification in python >= 2.6.')
+
+ msg.append('You can use validate_certs=False if you do'
+ ' not need to confirm the servers identity but this is'
+ ' unsafe and not recommended.'
+ ' Paths checked for this platform: %s.')
+
+ if exc:
+ msg.append('The exception msg was: %s.' % to_native(exc))
+
+ raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
+
+
+def atexit_remove_file(filename):
+ if os.path.exists(filename):
+ try:
+ os.unlink(filename)
+ except Exception:
+ # just ignore if we cannot delete, things should be ok
+ pass
+
+
+def make_context(cafile=None, cadata=None, ciphers=None, validate_certs=True):
+ if ciphers is None:
+ ciphers = []
+
+ if not is_sequence(ciphers):
+ raise TypeError('Ciphers must be a list. Got %s.' % ciphers.__class__.__name__)
+
+ if HAS_SSLCONTEXT:
+ context = create_default_context(cafile=cafile)
+ elif HAS_URLLIB3_PYOPENSSLCONTEXT:
+ context = PyOpenSSLContext(PROTOCOL)
+ else:
+ raise NotImplementedError('Host libraries are too old to support creating an sslcontext')
+
+ if not validate_certs:
+ if ssl.OP_NO_SSLv2:
+ context.options |= ssl.OP_NO_SSLv2
+ context.options |= ssl.OP_NO_SSLv3
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+
+ if validate_certs and any((cafile, cadata)):
+ context.load_verify_locations(cafile=cafile, cadata=cadata)
+
+ if ciphers:
+ context.set_ciphers(':'.join(map(to_native, ciphers)))
+
+ return context
+
+
+def get_ca_certs(cafile=None):
+ # tries to find a valid CA cert in one of the
+ # standard locations for the current distribution
+
+ cadata = bytearray()
+ paths_checked = []
+
+ if cafile:
+ paths_checked = [cafile]
+ with open(to_bytes(cafile, errors='surrogate_or_strict'), 'rb') as f:
+ if HAS_SSLCONTEXT:
+ for b_pem in extract_pem_certs(f.read()):
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(b_pem, errors='surrogate_or_strict')
+ )
+ )
+ return cafile, cadata, paths_checked
+
+ if not HAS_SSLCONTEXT:
+ paths_checked.append('/etc/ssl/certs')
+
+ system = to_text(platform.system(), errors='surrogate_or_strict')
+ # build a list of paths to check for .crt/.pem files
+ # based on the platform type
+ if system == u'Linux':
+ paths_checked.append('/etc/pki/ca-trust/extracted/pem')
+ paths_checked.append('/etc/pki/tls/certs')
+ paths_checked.append('/usr/share/ca-certificates/cacert.org')
+ elif system == u'FreeBSD':
+ paths_checked.append('/usr/local/share/certs')
+ elif system == u'OpenBSD':
+ paths_checked.append('/etc/ssl')
+ elif system == u'NetBSD':
+ paths_checked.append('/etc/openssl/certs')
+ elif system == u'SunOS':
+ paths_checked.append('/opt/local/etc/openssl/certs')
+ elif system == u'AIX':
+ paths_checked.append('/var/ssl/certs')
+ paths_checked.append('/opt/freeware/etc/ssl/certs')
+
+ # fall back to a user-deployed cert in a standard
+ # location if the OS platform one is not available
+ paths_checked.append('/etc/ansible')
+
+ tmp_path = None
+ if not HAS_SSLCONTEXT:
+ tmp_fd, tmp_path = tempfile.mkstemp()
+ atexit.register(atexit_remove_file, tmp_path)
+
+ # Write the dummy ca cert if we are running on macOS
+ if system == u'Darwin':
+ if HAS_SSLCONTEXT:
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict')
+ )
+ )
+ else:
+ os.write(tmp_fd, b_DUMMY_CA_CERT)
+ # Default Homebrew path for OpenSSL certs
+ paths_checked.append('/usr/local/etc/openssl')
+
+ # for all of the paths, find any .crt or .pem files
+ # and compile them into single temp file for use
+ # in the ssl check to speed up the test
+ for path in paths_checked:
+ if not os.path.isdir(path):
+ continue
+
+ dir_contents = os.listdir(path)
+ for f in dir_contents:
+ full_path = os.path.join(path, f)
+ if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
+ try:
+ if full_path not in LOADED_VERIFY_LOCATIONS:
+ with open(full_path, 'rb') as cert_file:
+ b_cert = cert_file.read()
+ if HAS_SSLCONTEXT:
+ try:
+ for b_pem in extract_pem_certs(b_cert):
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(b_pem, errors='surrogate_or_strict')
+ )
+ )
+ except Exception:
+ continue
+ else:
+ os.write(tmp_fd, b_cert)
+ os.write(tmp_fd, b'\n')
+ except (OSError, IOError):
+ pass
+
+ if HAS_SSLCONTEXT:
+ default_verify_paths = ssl.get_default_verify_paths()
+ paths_checked[:0] = [default_verify_paths.capath]
+ else:
+ os.close(tmp_fd)
+
+ return (tmp_path, cadata, paths_checked)
+
+
+class SSLValidationHandler(urllib_request.BaseHandler):
+ '''
+ A custom handler class for SSL validation.
+
+ Based on:
+ http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
+ http://techknack.net/python-urllib2-handlers/
+ '''
+ CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n"
+
+ def __init__(self, hostname, port, ca_path=None, ciphers=None, validate_certs=True):
+ self.hostname = hostname
+ self.port = port
+ self.ca_path = ca_path
+ self.ciphers = ciphers
+ self.validate_certs = validate_certs
+
+ def get_ca_certs(self):
+ return get_ca_certs(self.ca_path)
+
+ def validate_proxy_response(self, response, valid_codes=None):
+ '''
+ make sure we get back a valid code from the proxy
+ '''
+ valid_codes = [200] if valid_codes is None else valid_codes
+
+ try:
+ (http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
+ if int(resp_code) not in valid_codes:
+ raise Exception
+ except Exception:
+ raise ProxyError('Connection to proxy failed')
+
+ def detect_no_proxy(self, url):
+ '''
+ Detect if the 'no_proxy' environment variable is set and honor those locations.
+ '''
+ env_no_proxy = os.environ.get('no_proxy')
+ if env_no_proxy:
+ env_no_proxy = env_no_proxy.split(',')
+ netloc = urlparse(url).netloc
+
+ for host in env_no_proxy:
+ if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
+ # Our requested URL matches something in no_proxy, so don't
+ # use the proxy for this
+ return False
+ return True
+
+ def make_context(self, cafile, cadata, ciphers=None, validate_certs=True):
+ cafile = self.ca_path or cafile
+ if self.ca_path:
+ cadata = None
+ else:
+ cadata = cadata or None
+
+ return make_context(cafile=cafile, cadata=cadata, ciphers=ciphers, validate_certs=validate_certs)
+
+ def http_request(self, req):
+ tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs()
+
+ # Detect if 'no_proxy' environment variable is set and if our URL is included
+ use_proxy = self.detect_no_proxy(req.get_full_url())
+ https_proxy = os.environ.get('https_proxy')
+
+ context = None
+ try:
+ context = self.make_context(tmp_ca_cert_path, cadata, ciphers=self.ciphers, validate_certs=self.validate_certs)
+ except NotImplementedError:
+ # We'll make do with no context below
+ pass
+
+ try:
+ if use_proxy and https_proxy:
+ proxy_parts = generic_urlparse(urlparse(https_proxy))
+ port = proxy_parts.get('port') or 443
+ proxy_hostname = proxy_parts.get('hostname', None)
+ if proxy_hostname is None or proxy_parts.get('scheme') == '':
+ raise ProxyError("Failed to parse https_proxy environment variable."
+ " Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'")
+
+ s = socket.create_connection((proxy_hostname, port))
+ if proxy_parts.get('scheme') == 'http':
+ s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
+ if proxy_parts.get('username'):
+ credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
+ s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
+ s.sendall(b'\r\n')
+ connect_result = b""
+ while connect_result.find(b"\r\n\r\n") <= 0:
+ connect_result += s.recv(4096)
+ # 128 kilobytes of headers should be enough for everyone.
+ if len(connect_result) > 131072:
+ raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
+ self.validate_proxy_response(connect_result)
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
+ else:
+ raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
+ else:
+ s = socket.create_connection((self.hostname, self.port))
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
+ # close the ssl connection
+ # ssl_s.unwrap()
+ s.close()
+ except (ssl.SSLError, CertificateError) as e:
+ build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
+ except socket.error as e:
+ raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
+
+ return req
+
+ https_request = http_request
+
+
+def maybe_add_ssl_handler(url, validate_certs, ca_path=None, ciphers=None):
+ parsed = generic_urlparse(urlparse(url))
+ if parsed.scheme == 'https' and validate_certs:
+ if not HAS_SSL:
+ raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
+ ' however this is unsafe and not recommended')
+
+ # create the SSL validation handler
+ return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path, ciphers=ciphers, validate_certs=validate_certs)
+
+
+def getpeercert(response, binary_form=False):
+ """ Attempt to get the peer certificate of the response from urlopen. """
+ # The response from urllib2.open() is different across Python 2 and 3
+ if PY3:
+ socket = response.fp.raw._sock
+ else:
+ socket = response.fp._sock.fp._sock
+
+ try:
+ return socket.getpeercert(binary_form)
+ except AttributeError:
+ pass # Not HTTPS
+
+
+def get_channel_binding_cert_hash(certificate_der):
+ """ Gets the channel binding app data for a TLS connection using the peer cert. """
+ if not HAS_CRYPTOGRAPHY:
+ return
+
+ # Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4
+ cert = x509.load_der_x509_certificate(certificate_der, default_backend())
+
+ hash_algorithm = None
+ try:
+ hash_algorithm = cert.signature_hash_algorithm
+ except UnsupportedAlgorithm:
+ pass
+
+ # If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256.
+ if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']:
+ hash_algorithm = hashes.SHA256()
+
+ digest = hashes.Hash(hash_algorithm, default_backend())
+ digest.update(certificate_der)
+ return digest.finalize()
+
+
+def rfc2822_date_string(timetuple, zone='-0000'):
+ """Accepts a timetuple and optional zone which defaults to ``-0000``
+ and returns a date string as specified by RFC 2822, e.g.:
+
+ Fri, 09 Nov 2001 01:08:47 -0000
+
+ Copied from email.utils.formatdate and modified for separate use
+ """
+ return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+ ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
+ timetuple[2],
+ ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
+ timetuple[0], timetuple[3], timetuple[4], timetuple[5],
+ zone)
+
+
+class Request:
+ def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=False,
+ follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None,
+ ca_path=None, unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True):
+ """This class works somewhat similarly to the ``Session`` class of from requests
+ by defining a cookiejar that an be used across requests as well as cascaded defaults that
+ can apply to repeated requests
+
+ For documentation of params, see ``Request.open``
+
+ >>> from ansible.module_utils.urls import Request
+ >>> r = Request()
+ >>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read()
+ '{\n "cookies": {\n "k1": "v1"\n }\n}\n'
+ >>> r = Request(url_username='user', url_password='passwd')
+ >>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read()
+ '{\n "authenticated": true, \n "user": "user"\n}\n'
+ >>> r = Request(headers=dict(foo='bar'))
+ >>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read()
+
+ """
+
+ self.headers = headers or {}
+ if not isinstance(self.headers, dict):
+ raise ValueError("headers must be a dict: %r" % self.headers)
+ self.use_proxy = use_proxy
+ self.force = force
+ self.timeout = timeout
+ self.validate_certs = validate_certs
+ self.url_username = url_username
+ self.url_password = url_password
+ self.http_agent = http_agent
+ self.force_basic_auth = force_basic_auth
+ self.follow_redirects = follow_redirects
+ self.client_cert = client_cert
+ self.client_key = client_key
+ self.unix_socket = unix_socket
+ self.ca_path = ca_path
+ self.unredirected_headers = unredirected_headers
+ self.decompress = decompress
+ self.ciphers = ciphers
+ self.use_netrc = use_netrc
+ if isinstance(cookies, cookiejar.CookieJar):
+ self.cookies = cookies
+ else:
+ self.cookies = cookiejar.CookieJar()
+
+ def _fallback(self, value, fallback):
+ if value is None:
+ return fallback
+ return value
+
+ def open(self, method, url, data=None, headers=None, use_proxy=None,
+ force=None, last_mod_time=None, timeout=None, validate_certs=None,
+ url_username=None, url_password=None, http_agent=None,
+ force_basic_auth=None, follow_redirects=None,
+ client_cert=None, client_key=None, cookies=None, use_gssapi=False,
+ unix_socket=None, ca_path=None, unredirected_headers=None, decompress=None,
+ ciphers=None, use_netrc=None):
+ """
+ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
+
+ Does not require the module environment
+
+ Returns :class:`HTTPResponse` object.
+
+ :arg method: method for the request
+ :arg url: URL to request
+
+ :kwarg data: (optional) bytes, or file-like object to send
+ in the body of the request
+ :kwarg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :kwarg use_proxy: (optional) Boolean of whether or not to use proxy
+ :kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header
+ :kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header
+ :kwarg timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float
+ :kwarg validate_certs: (optional) Booleani that controls whether we verify
+ the server's TLS certificate
+ :kwarg url_username: (optional) String of the user to use when authenticating
+ :kwarg url_password: (optional) String of the password to use when authenticating
+ :kwarg http_agent: (optional) String of the User-Agent to use in the request
+ :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request
+ :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are
+ followed, see RedirectHandlerFactory for more information
+ :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication.
+ This file can also include the key as well, and if the key is included, client_key is not required
+ :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client
+ authentication. If client_cert contains both the certificate and key, this option is not required
+ :kwarg cookies: (optional) CookieJar object to send with the
+ request
+ :kwarg use_gssapi: (optional) Use GSSAPI handler of requests.
+ :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
+ connection to the provided url
+ :kwarg ca_path: (optional) String of file system path to CA cert bundle to use
+ :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
+ :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
+ :kwarg ciphers: (optional) List of ciphers to use
+ :kwarg use_netrc: (optional) Boolean determining whether to use credentials from ~/.netrc file
+ :returns: HTTPResponse. Added in Ansible 2.9
+ """
+
+ method = method.upper()
+
+ if headers is None:
+ headers = {}
+ elif not isinstance(headers, dict):
+ raise ValueError("headers must be a dict")
+ headers = dict(self.headers, **headers)
+
+ use_proxy = self._fallback(use_proxy, self.use_proxy)
+ force = self._fallback(force, self.force)
+ timeout = self._fallback(timeout, self.timeout)
+ validate_certs = self._fallback(validate_certs, self.validate_certs)
+ url_username = self._fallback(url_username, self.url_username)
+ url_password = self._fallback(url_password, self.url_password)
+ http_agent = self._fallback(http_agent, self.http_agent)
+ force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth)
+ follow_redirects = self._fallback(follow_redirects, self.follow_redirects)
+ client_cert = self._fallback(client_cert, self.client_cert)
+ client_key = self._fallback(client_key, self.client_key)
+ cookies = self._fallback(cookies, self.cookies)
+ unix_socket = self._fallback(unix_socket, self.unix_socket)
+ ca_path = self._fallback(ca_path, self.ca_path)
+ unredirected_headers = self._fallback(unredirected_headers, self.unredirected_headers)
+ decompress = self._fallback(decompress, self.decompress)
+ ciphers = self._fallback(ciphers, self.ciphers)
+ use_netrc = self._fallback(use_netrc, self.use_netrc)
+
+ handlers = []
+
+ if unix_socket:
+ handlers.append(UnixHTTPHandler(unix_socket))
+
+ parsed = generic_urlparse(urlparse(url))
+ if parsed.scheme != 'ftp':
+ username = url_username
+ password = url_password
+
+ if username:
+ netloc = parsed.netloc
+ elif '@' in parsed.netloc:
+ credentials, netloc = parsed.netloc.split('@', 1)
+ if ':' in credentials:
+ username, password = credentials.split(':', 1)
+ else:
+ username = credentials
+ password = ''
+
+ parsed_list = parsed.as_list()
+ parsed_list[1] = netloc
+
+ # reconstruct url without credentials
+ url = urlunparse(parsed_list)
+
+ if use_gssapi:
+ if HTTPGSSAPIAuthHandler:
+ handlers.append(HTTPGSSAPIAuthHandler(username, password))
+ else:
+ imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
+ url='https://pypi.org/project/gssapi/')
+ raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
+
+ elif username and not force_basic_auth:
+ passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+
+ # this creates a password manager
+ passman.add_password(None, netloc, username, password)
+
+ # because we have put None at the start it will always
+ # use this username/password combination for urls
+ # for which `theurl` is a super-url
+ authhandler = urllib_request.HTTPBasicAuthHandler(passman)
+ digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
+
+ # create the AuthHandler
+ handlers.append(authhandler)
+ handlers.append(digest_authhandler)
+
+ elif username and force_basic_auth:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ elif use_netrc:
+ try:
+ rc = netrc.netrc(os.environ.get('NETRC'))
+ login = rc.authenticators(parsed.hostname)
+ except IOError:
+ login = None
+
+ if login:
+ username, _, password = login
+ if username and password:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ if not use_proxy:
+ proxyhandler = urllib_request.ProxyHandler({})
+ handlers.append(proxyhandler)
+
+ if not any((HAS_SSLCONTEXT, HAS_URLLIB3_PYOPENSSLCONTEXT)):
+ ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path, ciphers=ciphers)
+ if ssl_handler:
+ handlers.append(ssl_handler)
+ else:
+ tmp_ca_path, cadata, paths_checked = get_ca_certs(ca_path)
+ context = make_context(
+ cafile=tmp_ca_path,
+ cadata=cadata,
+ ciphers=ciphers,
+ validate_certs=validate_certs,
+ )
+ handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
+ client_key=client_key,
+ unix_socket=unix_socket,
+ context=context))
+
+ handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path, ciphers=ciphers))
+
+ # add some nicer cookie handling
+ if cookies is not None:
+ handlers.append(urllib_request.HTTPCookieProcessor(cookies))
+
+ opener = urllib_request.build_opener(*handlers)
+ urllib_request.install_opener(opener)
+
+ data = to_bytes(data, nonstring='passthru')
+ request = RequestWithMethod(url, method, data)
+
+ # add the custom agent header, to help prevent issues
+ # with sites that block the default urllib agent string
+ if http_agent:
+ request.add_header('User-agent', http_agent)
+
+ # Cache control
+ # Either we directly force a cache refresh
+ if force:
+ request.add_header('cache-control', 'no-cache')
+ # or we do it if the original is more recent than our copy
+ elif last_mod_time:
+ tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT')
+ request.add_header('If-Modified-Since', tstamp)
+
+ # user defined headers now, which may override things we've set above
+ unredirected_headers = [h.lower() for h in (unredirected_headers or [])]
+ for header in headers:
+ if header.lower() in unredirected_headers:
+ request.add_unredirected_header(header, headers[header])
+ else:
+ request.add_header(header, headers[header])
+
+ r = urllib_request.urlopen(request, None, timeout)
+ if decompress and r.headers.get('content-encoding', '').lower() == 'gzip':
+ fp = GzipDecodedReader(r.fp)
+ if PY3:
+ r.fp = fp
+ # Content-Length does not match gzip decoded length
+ # Prevent ``r.read`` from stopping at Content-Length
+ r.length = None
+ else:
+ # Py2 maps ``r.read`` to ``fp.read``, create new ``addinfourl``
+ # object to compensate
+ msg = r.msg
+ r = urllib_request.addinfourl(
+ fp,
+ r.info(),
+ r.geturl(),
+ r.getcode()
+ )
+ r.msg = msg
+ return r
+
+ def get(self, url, **kwargs):
+ r"""Sends a GET request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('GET', url, **kwargs)
+
+ def options(self, url, **kwargs):
+ r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('OPTIONS', url, **kwargs)
+
+ def head(self, url, **kwargs):
+ r"""Sends a HEAD request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('HEAD', url, **kwargs)
+
+ def post(self, url, data=None, **kwargs):
+ r"""Sends a POST request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('POST', url, data=data, **kwargs)
+
+ def put(self, url, data=None, **kwargs):
+ r"""Sends a PUT request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('PUT', url, data=data, **kwargs)
+
+ def patch(self, url, data=None, **kwargs):
+ r"""Sends a PATCH request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('PATCH', url, data=data, **kwargs)
+
+ def delete(self, url, **kwargs):
+ r"""Sends a DELETE request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwargs \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('DELETE', url, **kwargs)
+
+
+def open_url(url, data=None, headers=None, method=None, use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None,
+ force_basic_auth=False, follow_redirects='urllib2',
+ client_cert=None, client_key=None, cookies=None,
+ use_gssapi=False, unix_socket=None, ca_path=None,
+ unredirected_headers=None, decompress=True, ciphers=None, use_netrc=True):
+ '''
+ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
+
+ Does not require the module environment
+ '''
+ method = method or ('POST' if data else 'GET')
+ return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth, follow_redirects=follow_redirects,
+ client_cert=client_cert, client_key=client_key, cookies=cookies,
+ use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+
+
+def prepare_multipart(fields):
+ """Takes a mapping, and prepares a multipart/form-data body
+
+ :arg fields: Mapping
+ :returns: tuple of (content_type, body) where ``content_type`` is
+ the ``multipart/form-data`` ``Content-Type`` header including
+ ``boundary`` and ``body`` is the prepared bytestring body
+
+ Payload content from a file will be base64 encoded and will include
+ the appropriate ``Content-Transfer-Encoding`` and ``Content-Type``
+ headers.
+
+ Example:
+ {
+ "file1": {
+ "filename": "/bin/true",
+ "mime_type": "application/octet-stream"
+ },
+ "file2": {
+ "content": "text based file content",
+ "filename": "fake.txt",
+ "mime_type": "text/plain",
+ },
+ "text_form_field": "value"
+ }
+ """
+
+ if not isinstance(fields, Mapping):
+ raise TypeError(
+ 'Mapping is required, cannot be type %s' % fields.__class__.__name__
+ )
+
+ m = email.mime.multipart.MIMEMultipart('form-data')
+ for field, value in sorted(fields.items()):
+ if isinstance(value, string_types):
+ main_type = 'text'
+ sub_type = 'plain'
+ content = value
+ filename = None
+ elif isinstance(value, Mapping):
+ filename = value.get('filename')
+ content = value.get('content')
+ if not any((filename, content)):
+ raise ValueError('at least one of filename or content must be provided')
+
+ mime = value.get('mime_type')
+ if not mime:
+ try:
+ mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream'
+ except Exception:
+ mime = 'application/octet-stream'
+ main_type, sep, sub_type = mime.partition('/')
+ else:
+ raise TypeError(
+ 'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__
+ )
+
+ if not content and filename:
+ with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
+ part = email.mime.application.MIMEApplication(f.read())
+ del part['Content-Type']
+ part.add_header('Content-Type', '%s/%s' % (main_type, sub_type))
+ else:
+ part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type)
+ part.set_payload(to_bytes(content))
+
+ part.add_header('Content-Disposition', 'form-data')
+ del part['MIME-Version']
+ part.set_param(
+ 'name',
+ field,
+ header='Content-Disposition'
+ )
+ if filename:
+ part.set_param(
+ 'filename',
+ to_native(os.path.basename(filename)),
+ header='Content-Disposition'
+ )
+
+ m.attach(part)
+
+ if PY3:
+ # Ensure headers are not split over multiple lines
+ # The HTTP policy also uses CRLF by default
+ b_data = m.as_bytes(policy=email.policy.HTTP)
+ else:
+ # Py2
+ # We cannot just call ``as_string`` since it provides no way
+ # to specify ``maxheaderlen``
+ fp = cStringIO() # cStringIO seems to be required here
+ # Ensure headers are not split over multiple lines
+ g = email.generator.Generator(fp, maxheaderlen=0)
+ g.flatten(m)
+ # ``fix_eols`` switches from ``\n`` to ``\r\n``
+ b_data = email.utils.fix_eols(fp.getvalue())
+ del m
+
+ headers, sep, b_content = b_data.partition(b'\r\n\r\n')
+ del b_data
+
+ if PY3:
+ parser = email.parser.BytesHeaderParser().parsebytes
+ else:
+ # Py2
+ parser = email.parser.HeaderParser().parsestr
+
+ return (
+ parser(headers)['content-type'], # Message converts to native strings
+ b_content
+ )
+
+
+#
+# Module-related functions
+#
+
+
+def basic_auth_header(username, password):
+ """Takes a username and password and returns a byte string suitable for
+ using as value of an Authorization header to do basic auth.
+ """
+ if password is None:
+ password = ''
+ return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
+
+
+def url_argument_spec():
+ '''
+ Creates an argument spec that can be used with any module
+ that will be requesting content via urllib/urllib2
+ '''
+ return dict(
+ url=dict(type='str'),
+ force=dict(type='bool', default=False),
+ http_agent=dict(type='str', default='ansible-httpget'),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ url_username=dict(type='str'),
+ url_password=dict(type='str', no_log=True),
+ force_basic_auth=dict(type='bool', default=False),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ use_gssapi=dict(type='bool', default=False),
+ )
+
+
+def fetch_url(module, url, data=None, headers=None, method=None,
+ use_proxy=None, force=False, last_mod_time=None, timeout=10,
+ use_gssapi=False, unix_socket=None, ca_path=None, cookies=None, unredirected_headers=None,
+ decompress=True, ciphers=None, use_netrc=True):
+ """Sends a request via HTTP(S) or FTP (needs the module as parameter)
+
+ :arg module: The AnsibleModule (used to get username, password etc. (s.b.).
+ :arg url: The url to use.
+
+ :kwarg data: The data to be sent (in case of POST/PUT).
+ :kwarg headers: A dict with the request headers.
+ :kwarg method: "POST", "PUT", etc.
+ :kwarg use_proxy: (optional) whether or not to use proxy (Default: True)
+ :kwarg boolean force: If True: Do not get a cached copy (Default: False)
+ :kwarg last_mod_time: Default: None
+ :kwarg int timeout: Default: 10
+ :kwarg boolean use_gssapi: Default: False
+ :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
+ connection to the provided url
+ :kwarg ca_path: (optional) String of file system path to CA cert bundle to use
+ :kwarg cookies: (optional) CookieJar object to send with the request
+ :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
+ :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
+ :kwarg cipher: (optional) List of ciphers to use
+ :kwarg boolean use_netrc: (optional) If False: Ignores login and password in ~/.netrc file (Default: True)
+
+ :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
+ The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
+ occurred then ``info['body']`` contains the error response data::
+
+ Example::
+
+ data={...}
+ resp, info = fetch_url(module,
+ "http://example.com",
+ data=module.jsonify(data),
+ headers={'Content-type': 'application/json'},
+ method="POST")
+ status_code = info["status"]
+ body = resp.read()
+ if status_code >= 400 :
+ body = info['body']
+ """
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ if not HAS_GZIP and decompress is True:
+ decompress = False
+ module.deprecate(
+ '%s. "decompress" has been automatically disabled to prevent a failure' % GzipDecodedReader.missing_gzip_error(),
+ version='2.16'
+ )
+
+ # ensure we use proper tempdir
+ old_tempdir = tempfile.tempdir
+ tempfile.tempdir = module.tmpdir
+
+ # Get validate_certs from the module params
+ validate_certs = module.params.get('validate_certs', True)
+
+ if use_proxy is None:
+ use_proxy = module.params.get('use_proxy', True)
+
+ username = module.params.get('url_username', '')
+ password = module.params.get('url_password', '')
+ http_agent = module.params.get('http_agent', 'ansible-httpget')
+ force_basic_auth = module.params.get('force_basic_auth', '')
+
+ follow_redirects = module.params.get('follow_redirects', 'urllib2')
+
+ client_cert = module.params.get('client_cert')
+ client_key = module.params.get('client_key')
+ use_gssapi = module.params.get('use_gssapi', use_gssapi)
+
+ if not isinstance(cookies, cookiejar.CookieJar):
+ cookies = cookiejar.LWPCookieJar()
+
+ r = None
+ info = dict(url=url, status=-1)
+ try:
+ r = open_url(url, data=data, headers=headers, method=method,
+ use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
+ validate_certs=validate_certs, url_username=username,
+ url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
+ follow_redirects=follow_redirects, client_cert=client_cert,
+ client_key=client_key, cookies=cookies, use_gssapi=use_gssapi,
+ unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers,
+ decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+ # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
+ info.update(dict((k.lower(), v) for k, v in r.info().items()))
+
+ # Don't be lossy, append header values for duplicate headers
+ # In Py2 there is nothing that needs done, py2 does this for us
+ if PY3:
+ temp_headers = {}
+ for name, value in r.headers.items():
+ # The same as above, lower case keys to match py2 behavior, and create more consistent results
+ name = name.lower()
+ if name in temp_headers:
+ temp_headers[name] = ', '.join((temp_headers[name], value))
+ else:
+ temp_headers[name] = value
+ info.update(temp_headers)
+
+ # parse the cookies into a nice dictionary
+ cookie_list = []
+ cookie_dict = dict()
+ # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs``
+ # Cookies with the same path are reversed from response order.
+ # This code makes no assumptions about that, and accepts the order given by python
+ for cookie in cookies:
+ cookie_dict[cookie.name] = cookie.value
+ cookie_list.append((cookie.name, cookie.value))
+ info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list)
+
+ info['cookies'] = cookie_dict
+ # finally update the result with a message about the fetch
+ info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
+ except NoSSLError as e:
+ distribution = get_distribution()
+ if distribution is not None and distribution.lower() == 'redhat':
+ module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info)
+ else:
+ module.fail_json(msg='%s' % to_native(e), **info)
+ except (ConnectionError, ValueError) as e:
+ module.fail_json(msg=to_native(e), **info)
+ except MissingModuleError as e:
+ module.fail_json(msg=to_text(e), exception=e.import_traceback)
+ except urllib_error.HTTPError as e:
+ r = e
+ try:
+ if e.fp is None:
+ # Certain HTTPError objects may not have the ability to call ``.read()`` on Python 3
+ # This is not handled gracefully in Python 3, and instead an exception is raised from
+ # tempfile, due to ``urllib.response.addinfourl`` not being initialized
+ raise AttributeError
+ body = e.read()
+ except AttributeError:
+ body = ''
+ else:
+ e.close()
+
+ # Try to add exception info to the output but don't fail if we can't
+ try:
+ # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
+ info.update(dict((k.lower(), v) for k, v in e.info().items()))
+ except Exception:
+ pass
+
+ info.update({'msg': to_native(e), 'body': body, 'status': e.code})
+
+ except urllib_error.URLError as e:
+ code = int(getattr(e, 'code', -1))
+ info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
+ except socket.error as e:
+ info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
+ except httplib.BadStatusLine as e:
+ info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1))
+ except Exception as e:
+ info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
+ exception=traceback.format_exc())
+ finally:
+ tempfile.tempdir = old_tempdir
+
+ return r, info
+
+
+def _suffixes(name):
+ """A list of the final component's suffixes, if any."""
+ if name.endswith('.'):
+ return []
+ name = name.lstrip('.')
+ return ['.' + s for s in name.split('.')[1:]]
+
+
+def _split_multiext(name, min=3, max=4, count=2):
+ """Split a multi-part extension from a file name.
+
+ Returns '([name minus extension], extension)'.
+
+ Define the valid extension length (including the '.') with 'min' and 'max',
+ 'count' sets the number of extensions, counting from the end, to evaluate.
+ Evaluation stops on the first file extension that is outside the min and max range.
+
+ If no valid extensions are found, the original ``name`` is returned
+ and ``extension`` is empty.
+
+ :arg name: File name or path.
+ :kwarg min: Minimum length of a valid file extension.
+ :kwarg max: Maximum length of a valid file extension.
+ :kwarg count: Number of suffixes from the end to evaluate.
+
+ """
+ extension = ''
+ for i, sfx in enumerate(reversed(_suffixes(name))):
+ if i >= count:
+ break
+
+ if min <= len(sfx) <= max:
+ extension = '%s%s' % (sfx, extension)
+ name = name.rstrip(sfx)
+ else:
+ # Stop on the first invalid extension
+ break
+
+ return name, extension
+
+
+def fetch_file(module, url, data=None, headers=None, method=None,
+ use_proxy=True, force=False, last_mod_time=None, timeout=10,
+ unredirected_headers=None, decompress=True, ciphers=None):
+ '''Download and save a file via HTTP(S) or FTP (needs the module as parameter).
+ This is basically a wrapper around fetch_url().
+
+ :arg module: The AnsibleModule (used to get username, password etc. (s.b.).
+ :arg url: The url to use.
+
+ :kwarg data: The data to be sent (in case of POST/PUT).
+ :kwarg headers: A dict with the request headers.
+ :kwarg method: "POST", "PUT", etc.
+ :kwarg boolean use_proxy: Default: True
+ :kwarg boolean force: If True: Do not get a cached copy (Default: False)
+ :kwarg last_mod_time: Default: None
+ :kwarg int timeout: Default: 10
+ :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
+ :kwarg decompress: (optional) Whether to attempt to decompress gzip content-encoded responses
+ :kwarg ciphers: (optional) List of ciphers to use
+
+ :returns: A string, the path to the downloaded file.
+ '''
+ # download file
+ bufsize = 65536
+ parts = urlparse(url)
+ file_prefix, file_ext = _split_multiext(os.path.basename(parts.path), count=2)
+ fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_prefix, suffix=file_ext, delete=False)
+ module.add_cleanup_file(fetch_temp_file.name)
+ try:
+ rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers)
+ if not rsp:
+ module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg']))
+ data = rsp.read(bufsize)
+ while data:
+ fetch_temp_file.write(data)
+ data = rsp.read(bufsize)
+ fetch_temp_file.close()
+ except Exception as e:
+ module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e)))
+ return fetch_temp_file.name
diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py
new file mode 100644
index 0000000..e265a2d
--- /dev/null
+++ b/lib/ansible/module_utils/yumdnf.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+#
+# # Copyright: (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+# Contributing Authors:
+# - Ansible Core Team
+# - Eduard Snesarev (@verm666)
+# - Berend De Schouwer (@berenddeschouwer)
+# - Abhijeet Kasurde (@Akasurde)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+import glob
+import tempfile
+from abc import ABCMeta, abstractmethod
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import with_metaclass
+
+yumdnf_argument_spec = dict(
+ argument_spec=dict(
+ allow_downgrade=dict(type='bool', default=False),
+ autoremove=dict(type='bool', default=False),
+ bugfix=dict(required=False, type='bool', default=False),
+ cacheonly=dict(type='bool', default=False),
+ conf_file=dict(type='str'),
+ disable_excludes=dict(type='str', default=None),
+ disable_gpg_check=dict(type='bool', default=False),
+ disable_plugin=dict(type='list', elements='str', default=[]),
+ disablerepo=dict(type='list', elements='str', default=[]),
+ download_only=dict(type='bool', default=False),
+ download_dir=dict(type='str', default=None),
+ enable_plugin=dict(type='list', elements='str', default=[]),
+ enablerepo=dict(type='list', elements='str', default=[]),
+ exclude=dict(type='list', elements='str', default=[]),
+ installroot=dict(type='str', default="/"),
+ install_repoquery=dict(type='bool', default=True),
+ install_weak_deps=dict(type='bool', default=True),
+ list=dict(type='str'),
+ name=dict(type='list', elements='str', aliases=['pkg'], default=[]),
+ releasever=dict(default=None),
+ security=dict(type='bool', default=False),
+ skip_broken=dict(type='bool', default=False),
+ # removed==absent, installed==present, these are accepted as aliases
+ state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
+ update_only=dict(required=False, default="no", type='bool'),
+ validate_certs=dict(type='bool', default=True),
+ sslverify=dict(type='bool', default=True),
+ lock_timeout=dict(type='int', default=30),
+ ),
+ required_one_of=[['name', 'list', 'update_cache']],
+ mutually_exclusive=[['name', 'list']],
+ supports_check_mode=True,
+)
+
+
+class YumDnf(with_metaclass(ABCMeta, object)): # type: ignore[misc]
+ """
+ Abstract class that handles the population of instance variables that should
+ be identical between both YUM and DNF modules because of the feature parity
+ and shared argument spec
+ """
+
+ def __init__(self, module):
+
+ self.module = module
+
+ self.allow_downgrade = self.module.params['allow_downgrade']
+ self.autoremove = self.module.params['autoremove']
+ self.bugfix = self.module.params['bugfix']
+ self.cacheonly = self.module.params['cacheonly']
+ self.conf_file = self.module.params['conf_file']
+ self.disable_excludes = self.module.params['disable_excludes']
+ self.disable_gpg_check = self.module.params['disable_gpg_check']
+ self.disable_plugin = self.module.params['disable_plugin']
+ self.disablerepo = self.module.params.get('disablerepo', [])
+ self.download_only = self.module.params['download_only']
+ self.download_dir = self.module.params['download_dir']
+ self.enable_plugin = self.module.params['enable_plugin']
+ self.enablerepo = self.module.params.get('enablerepo', [])
+ self.exclude = self.module.params['exclude']
+ self.installroot = self.module.params['installroot']
+ self.install_repoquery = self.module.params['install_repoquery']
+ self.install_weak_deps = self.module.params['install_weak_deps']
+ self.list = self.module.params['list']
+ self.names = [p.strip() for p in self.module.params['name']]
+ self.releasever = self.module.params['releasever']
+ self.security = self.module.params['security']
+ self.skip_broken = self.module.params['skip_broken']
+ self.state = self.module.params['state']
+ self.update_only = self.module.params['update_only']
+ self.update_cache = self.module.params['update_cache']
+ self.validate_certs = self.module.params['validate_certs']
+ self.sslverify = self.module.params['sslverify']
+ self.lock_timeout = self.module.params['lock_timeout']
+
+ # It's possible someone passed a comma separated string since it used
+ # to be a string type, so we should handle that
+ self.names = self.listify_comma_sep_strings_in_list(self.names)
+ self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
+ self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
+ self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
+
+ # Fail if someone passed a space separated string
+ # https://github.com/ansible/ansible/issues/46301
+ for name in self.names:
+ if ' ' in name and not any(spec in name for spec in ['@', '>', '<', '=']):
+ module.fail_json(
+ msg='It appears that a space separated string of packages was passed in '
+ 'as an argument. To operate on several packages, pass a comma separated '
+ 'string of packages or a list of packages.'
+ )
+
+ # Sanity checking for autoremove
+ if self.state is None:
+ if self.autoremove:
+ self.state = "absent"
+ else:
+ self.state = "present"
+
+ if self.autoremove and (self.state != "absent"):
+ self.module.fail_json(
+ msg="Autoremove should be used alone or with state=absent",
+ results=[],
+ )
+
+ # This should really be redefined by both the yum and dnf module but a
+ # default isn't a bad idea
+ self.lockfile = '/var/run/yum.pid'
+
+ @abstractmethod
+ def is_lockfile_pid_valid(self):
+ return
+
+ def _is_lockfile_present(self):
+ return (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)) and self.is_lockfile_pid_valid()
+
+ def wait_for_lock(self):
+ '''Poll until the lock is removed if timeout is a positive number'''
+
+ if not self._is_lockfile_present():
+ return
+
+ if self.lock_timeout > 0:
+ for iteration in range(0, self.lock_timeout):
+ time.sleep(1)
+ if not self._is_lockfile_present():
+ return
+
+ self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
+
+ def listify_comma_sep_strings_in_list(self, some_list):
+ """
+ method to accept a list of strings as the parameter, find any strings
+ in that list that are comma separated, remove them from the list and add
+ their comma separated elements to the original list
+ """
+ new_list = []
+ remove_from_original_list = []
+ for element in some_list:
+ if ',' in element:
+ remove_from_original_list.append(element)
+ new_list.extend([e.strip() for e in element.split(',')])
+
+ for element in remove_from_original_list:
+ some_list.remove(element)
+
+ some_list.extend(new_list)
+
+ if some_list == [""]:
+ return []
+
+ return some_list
+
+ @abstractmethod
+ def run(self):
+ raise NotImplementedError
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/modules/__init__.py
diff --git a/lib/ansible/modules/_include.py b/lib/ansible/modules/_include.py
new file mode 100644
index 0000000..60deb94
--- /dev/null
+++ b/lib/ansible/modules/_include.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include
+short_description: Include a task list
+description:
+ - Includes a file with a list of tasks to be executed in the current playbook.
+ - Lists of tasks can only be included where tasks
+ normally run (in play).
+ - Before Ansible 2.0, all includes were 'static' and were executed when the play was compiled.
+ - Static includes are not subject to most directives. For example, loops or conditionals are applied instead to each
+ inherited task.
+ - Since Ansible 2.0, task includes are dynamic and behave more like real tasks. This means they can be looped,
+ skipped and use variables from any source. Ansible tries to auto detect this, but you can use the C(static)
+ directive (which was added in Ansible 2.1) to bypass autodetection.
+ - This module is also supported for Windows targets.
+version_added: "0.6"
+deprecated:
+ why: it has too many conflicting behaviours depending on keyword combinations and it was unclear how it should behave in each case.
+ new actions were developed that were specific about each case and related behaviours.
+ alternative: include_tasks, import_tasks, import_playbook
+ removed_in: "2.16"
+ removed_from_collection: 'ansible.builtin'
+options:
+ free-form:
+ description:
+ - This module allows you to specify the name of the file directly without any other options.
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+ - Include has some unintuitive behaviours depending on if it is running in a static or dynamic in play or in playbook context,
+ in an effort to clarify behaviours we are moving to a new set modules (M(ansible.builtin.include_tasks),
+ M(ansible.builtin.include_role), M(ansible.builtin.import_playbook), M(ansible.builtin.import_tasks))
+ that have well established and clear behaviours.
+ - This module no longer supporst including plays. Use M(ansible.builtin.import_playbook) instead.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.include: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ ansible.builtin.include: "{{ hostvar }}.yaml"
+ static: no
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
new file mode 100644
index 0000000..b446df5
--- /dev/null
+++ b/lib/ansible/modules/add_host.py
@@ -0,0 +1,115 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Seth Vidal (@skvidal)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: add_host
+short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory
+description:
+- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
+- Takes variables so you can define the new hosts more fully.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - The hostname/ip of the host to add to the inventory, can include a colon and a port number.
+ type: str
+ required: true
+ aliases: [ host, hostname ]
+ groups:
+ description:
+ - The groups to add the hostname to.
+ type: list
+ elements: str
+ aliases: [ group, groupname ]
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ support: full
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ bypass_task_loop:
+ support: none
+ check_mode:
+ details: While this makes no changes to target systems the 'in memory' inventory will still be altered
+ support: partial
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+- The alias C(host) of the parameter C(name) is only available on Ansible 2.4 and newer.
+- Since Ansible 2.4, the C(inventory_dir) variable is now set to C(None) instead of the 'global inventory source',
+ because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
+- Though this module does not change the remote host, we do provide 'changed' status as it can be useful for those trying to track inventory changes.
+- The hosts added will not bypass the C(--limit) from the command line, so both of those need to be in agreement to make them available as play targets.
+ They are still available from hostvars and for delegation as a normal part of the inventory.
+seealso:
+- module: ansible.builtin.group_by
+author:
+- Ansible Core Team
+- Seth Vidal (@skvidal)
+'''
+
+EXAMPLES = r'''
+- name: Add host to group 'just_created' with variable foo=42
+ ansible.builtin.add_host:
+ name: '{{ ip_from_ec2 }}'
+ groups: just_created
+ foo: 42
+
+- name: Add host to multiple groups
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ groups:
+ - group1
+ - group2
+
+- name: Add a host with a non-standard port local to your machines
+ ansible.builtin.add_host:
+ name: '{{ new_ip }}:{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 1.9 and older)
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ ansible_ssh_host: '{{ inventory_hostname }}'
+ ansible_ssh_port: '{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 2.0 and newer)
+ ansible.builtin.add_host:
+ hostname: '{{ new_ip }}'
+ ansible_host: '{{ inventory_hostname }}'
+ ansible_port: '{{ new_port }}'
+
+- name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre Ansible 2.4 behaviour)
+ ansible.builtin.add_host:
+ hostname: charlie
+ inventory_dir: '{{ inventory_dir }}'
+
+- name: Add all hosts running this playbook to the done group
+ ansible.builtin.add_host:
+ name: '{{ item }}'
+ groups: done
+ loop: "{{ ansible_play_hosts }}"
+'''
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
new file mode 100644
index 0000000..1b7c5d2
--- /dev/null
+++ b/lib/ansible/modules/apt.py
@@ -0,0 +1,1487 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Flowroute LLC
+# Written by Matthew Williams <matthew@flowroute.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt
+short_description: Manages apt-packages
+description:
+ - Manages I(apt) packages (such as for Debian/Ubuntu).
+version_added: "0.0.2"
+options:
+ name:
+ description:
+ - A list of package names, like C(foo), or package specifier with version, like C(foo=1.0) or C(foo>=1.0).
+ Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported.
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
+ are installed. C(fixed) attempt to correct a system with broken dependencies in place.
+ type: str
+ default: present
+ choices: [ absent, build-dep, latest, present, fixed ]
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
+ - Default is not to update the cache.
+ aliases: [ update-cache ]
+ type: bool
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ cache_valid_time:
+ description:
+ - Update the apt cache if it is older than the I(cache_valid_time). This option is set in seconds.
+ - As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
+ type: int
+ default: 0
+ purge:
+ description:
+ - Will force purging of configuration files if the module state is set to I(absent).
+ type: bool
+ default: 'no'
+ default_release:
+ description:
+ - Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ aliases: [ default-release ]
+ type: str
+ install_recommends:
+ description:
+ - Corresponds to the C(--no-install-recommends) option for I(apt). C(true) installs recommended packages. C(false) does not install
+ recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
+ aliases: [ install-recommends ]
+ type: bool
+ force:
+ description:
+ - 'Corresponds to the C(--force-yes) to I(apt-get) and implies C(allow_unauthenticated: yes) and C(allow_downgrade: yes)'
+ - "This option will disable checking both the packages' signatures and the certificates of the
+ web servers they are downloaded from."
+ - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
+ - '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
+ Please also see C(man apt-get) for more information.'
+ type: bool
+ default: 'no'
+ clean:
+ description:
+ - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
+ the lock file from /var/cache/apt/archives/ and /var/cache/apt/archives/partial/.
+ - Can be run as part of the package installation (clean runs before install) or as a separate step.
+ type: bool
+ default: 'no'
+ version_added: "2.13"
+ allow_unauthenticated:
+ description:
+ - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
+ - 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
+ aliases: [ allow-unauthenticated ]
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ allow_downgrade:
+ description:
+ - Corresponds to the C(--allow-downgrades) option for I(apt).
+ - This option enables the named package and version to replace an already installed higher version of that package.
+ - Note that setting I(allow_downgrade=true) can make this module behave in a non-idempotent way.
+ - (The task could end up with a set of packages that does not match the complete list of specified packages to install).
+ aliases: [ allow-downgrade, allow_downgrades, allow-downgrades ]
+ type: bool
+ default: 'no'
+ version_added: "2.12"
+ allow_change_held_packages:
+ description:
+ - Allows changing the version of a package which is on the apt hold list
+ type: bool
+ default: 'no'
+ version_added: '2.13'
+ upgrade:
+ description:
+ - If yes or safe, performs an aptitude safe-upgrade.
+ - If full, performs an aptitude full-upgrade.
+ - If dist, performs an apt-get dist-upgrade.
+ - 'Note: This does not upgrade a specific package, use state=latest for that.'
+ - 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
+ version_added: "1.1"
+ choices: [ dist, full, 'no', safe, 'yes' ]
+ default: 'no'
+ type: str
+ dpkg_options:
+ description:
+ - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
+ - Options should be supplied as comma separated list
+ default: force-confdef,force-confold
+ type: str
+ deb:
+ description:
+ - Path to a .deb package on the remote machine.
+ - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
+ - Requires the C(xz-utils) package to extract the control file of the deb package to install.
+ type: path
+ required: false
+ version_added: "1.6"
+ autoremove:
+ description:
+ - If C(true), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
+ - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ autoclean:
+ description:
+ - If C(true), cleans the local repository of retrieved package files that can no longer be downloaded.
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ policy_rc_d:
+ description:
+ - Force the exit code of /usr/sbin/policy-rc.d.
+ - For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
+ - If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
+ - If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
+ type: int
+ default: null
+ version_added: "2.8"
+ only_upgrade:
+ description:
+ - Only upgrade a package if it is already installed.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ fail_on_autoremove:
+ description:
+ - 'Corresponds to the C(--no-remove) option for C(apt).'
+ - 'If C(true), it is ensured that no packages will be removed or the task will fail.'
+ - 'C(fail_on_autoremove) is only supported with state except C(absent)'
+ type: bool
+ default: 'no'
+ version_added: "2.11"
+ force_apt_get:
+ description:
+ - Force usage of apt-get instead of aptitude
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ lock_timeout:
+ description:
+ - How many seconds will this action wait to acquire a lock on the apt db.
+ - Sometimes there is a transitory lock and this will retry at least until timeout is hit.
+ type: int
+ default: 60
+ version_added: "2.12"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - aptitude (before 2.4)
+author: "Matthew Williams (@mgwilliams)"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: debian
+notes:
+ - Three of the upgrade modes (C(full), C(safe) and its alias C(true)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
+ - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
+ For example when installing Postgresql-9.5 in Debian 9, creating an excutable shell script (/usr/sbin/policy-rc.d) that throws
+ a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or remove its execute permission afterwards.
+ - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
+ (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
+ Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - When C(default_release) is used, an implicit priority of 990 is used. This is the same behavior as C(apt-get -t).
+ - When an exact version is specified, an implicit priority of 1001 is used.
+'''
+
+EXAMPLES = '''
+- name: Install apache httpd (state=present is optional)
+ ansible.builtin.apt:
+ name: apache2
+ state: present
+
+- name: Update repositories cache and install "foo" package
+ ansible.builtin.apt:
+ name: foo
+ update_cache: yes
+
+- name: Remove "foo" package
+ ansible.builtin.apt:
+ name: foo
+ state: absent
+
+- name: Install the package "foo"
+ ansible.builtin.apt:
+ name: foo
+
+- name: Install a list of packages
+ ansible.builtin.apt:
+ pkg:
+ - foo
+ - foo-tools
+
+- name: Install the version '1.00' of package "foo"
+ ansible.builtin.apt:
+ name: foo=1.00
+
+- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
+ ansible.builtin.apt:
+ name: nginx
+ state: latest
+ default_release: squeeze-backports
+ update_cache: yes
+
+- name: Install the version '1.18.0' of package "nginx" and allow potential downgrades
+ ansible.builtin.apt:
+ name: nginx=1.18.0
+ state: present
+ allow_downgrade: yes
+
+- name: Install zfsutils-linux with ensuring conflicted packages (e.g. zfs-fuse) will not be removed.
+ ansible.builtin.apt:
+ name: zfsutils-linux
+ state: latest
+ fail_on_autoremove: yes
+
+- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
+ ansible.builtin.apt:
+ name: openjdk-6-jdk
+ state: latest
+ install_recommends: no
+
+- name: Update all packages to their latest version
+ ansible.builtin.apt:
+ name: "*"
+ state: latest
+
+- name: Upgrade the OS (apt-get dist-upgrade)
+ ansible.builtin.apt:
+ upgrade: dist
+
+- name: Run the equivalent of "apt-get update" as a separate step
+ ansible.builtin.apt:
+ update_cache: yes
+
+- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
+ ansible.builtin.apt:
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Pass options to dpkg on run
+ ansible.builtin.apt:
+ upgrade: dist
+ update_cache: yes
+ dpkg_options: 'force-confold,force-confdef'
+
+- name: Install a .deb package
+ ansible.builtin.apt:
+ deb: /tmp/mypackage.deb
+
+- name: Install the build dependencies for package "foo"
+ ansible.builtin.apt:
+ pkg: foo
+ state: build-dep
+
+- name: Install a .deb package from the internet
+ ansible.builtin.apt:
+ deb: https://example.com/python-ppq_0.1-1_all.deb
+
+- name: Remove useless packages from the cache
+ ansible.builtin.apt:
+ autoclean: yes
+
+- name: Remove dependencies that are no longer required
+ ansible.builtin.apt:
+ autoremove: yes
+
+- name: Run the equivalent of "apt-get clean" as a separate step
+ apt:
+ clean: yes
+'''
+
+RETURN = '''
+cache_updated:
+ description: if the cache was updated or not
+ returned: success, in some cases
+ type: bool
+ sample: True
+cache_update_time:
+ description: time of the last cache update (0 if unknown)
+ returned: success, in some cases
+ type: int
+ sample: 1425828348000
+stdout:
+ description: output from apt
+ returned: success, when needed
+ type: str
+ sample: |-
+ Reading package lists...
+ Building dependency tree...
+ Reading state information...
+ The following extra packages will be installed:
+ apache2-bin ...
+stderr:
+ description: error output from apt
+ returned: success, when needed
+ type: str
+ sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
+''' # NOQA
+
+# added to stave off future warnings about apt api
+import warnings
+warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
+
+import datetime
+import fnmatch
+import itertools
+import os
+import random
+import re
+import shutil
+import sys
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.urls import fetch_file
+
+DPKG_OPTIONS = 'force-confdef,force-confold'
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
+APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
+APT_LISTS_PATH = "/var/lib/apt/lists"
+APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
+APT_MARK_INVALID_OP = 'Invalid operation'
+APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
+
+CLEAN_OP_CHANGED_STR = dict(
+ autoremove='The following packages will be REMOVED',
+ # "Del python3-q 2.4-1 [24 kB]"
+ autoclean='Del ',
+)
+
+
+HAS_PYTHON_APT = False
+try:
+ import apt
+ import apt.debfile
+ import apt_pkg
+ HAS_PYTHON_APT = True
+except ImportError:
+ apt = apt_pkg = None
+
+
+class PolicyRcD(object):
+ """
+ This class is a context manager for the /usr/sbin/policy-rc.d file.
+ It allow the user to prevent dpkg to start the corresponding service when installing
+ a package.
+ https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
+ """
+
+ def __init__(self, module):
+ # we need the module for later use (eg. fail_json)
+ self.m = module
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exists
+ # we will back it up during package installation
+ # then restore it
+ if os.path.exists('/usr/sbin/policy-rc.d'):
+ self.backup_dir = tempfile.mkdtemp(prefix="ansible")
+ else:
+ self.backup_dir = None
+
+ def __enter__(self):
+ """
+ This method will be called when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exists we back it up
+ if self.backup_dir:
+ try:
+ shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
+ except Exception:
+ self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
+
+ # we write /usr/sbin/policy-rc.d so it always exits with code policy_rc_d
+ try:
+ with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
+ policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
+
+ os.chmod('/usr/sbin/policy-rc.d', 0o0755)
+ except Exception:
+ self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
+
+ def __exit__(self, type, value, traceback):
+ """
+ This method will be called when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ if self.backup_dir:
+ # if /usr/sbin/policy-rc.d already exists before the call to __enter__
+ # we restore it (from the backup done in __enter__)
+ try:
+ shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
+ '/usr/sbin/policy-rc.d')
+ os.rmdir(self.backup_dir)
+ except Exception:
+ self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
+ % os.path.join(self.backup_dir, 'policy-rc.d'))
+ else:
+ # if there wasn't a /usr/sbin/policy-rc.d file before the call to __enter__
+ # we just remove the file
+ try:
+ os.remove('/usr/sbin/policy-rc.d')
+ except Exception:
+ self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
+
+
+def package_split(pkgspec):
+ parts = re.split(r'(>?=)', pkgspec, 1)
+ if len(parts) > 1:
+ return parts
+ return parts[0], None, None
+
+
+def package_version_compare(version, other_version):
+ try:
+ return apt_pkg.version_compare(version, other_version)
+ except AttributeError:
+ return apt_pkg.VersionCompare(version, other_version)
+
+
+def package_best_match(pkgname, version_cmp, version, release, cache):
+ policy = apt_pkg.Policy(cache)
+
+ policy.read_pinfile(apt_pkg.config.find_file("Dir::Etc::preferences"))
+ policy.read_pindir(apt_pkg.config.find_file("Dir::Etc::preferencesparts"))
+
+ if release:
+ # 990 is the priority used in `apt-get -t`
+ policy.create_pin('Release', pkgname, release, 990)
+ if version_cmp == "=":
+ # Installing a specific version from command line overrides all pinning
+ # We don't mimmic this exactly, but instead set a priority which is higher than all APT built-in pin priorities.
+ policy.create_pin('Version', pkgname, version, 1001)
+ pkg = cache[pkgname]
+ pkgver = policy.get_candidate_ver(pkg)
+ if not pkgver:
+ return None
+ if version_cmp == "=" and not fnmatch.fnmatch(pkgver.ver_str, version):
+ # Even though we put in a pin policy, it can be ignored if there is no
+ # possible candidate.
+ return None
+ return pkgver.ver_str
+
+
+def package_status(m, pkgname, version_cmp, version, default_release, cache, state):
+ """
+ :return: A tuple of (installed, installed_version, version_installable, has_files). *installed* indicates whether
+ the package (regardless of version) is installed. *installed_version* indicates whether the installed package
+ matches the provided version criteria. *version_installable* provides the latest matching version that can be
+ installed. In the case of virtual packages where we can't determine an applicable match, True is returned.
+ *has_files* indicates whether the package has files on the filesystem (even if not installed, meaning a purge is
+ required).
+ """
+ try:
+ # get the package from the cache, as well as the
+ # low-level apt_pkg.Package object which contains
+ # state fields not directly accessible from the
+ # higher-level apt.package.Package object.
+ pkg = cache[pkgname]
+ ll_pkg = cache._cache[pkgname] # the low-level package object
+ except KeyError:
+ if state == 'install':
+ try:
+ provided_packages = cache.get_providing_packages(pkgname)
+ if provided_packages:
+ # When this is a virtual package satisfied by only
+ # one installed package, return the status of the target
+ # package to avoid requesting re-install
+ if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
+ package = provided_packages[0]
+ installed, installed_version, version_installable, has_files = \
+ package_status(m, package.name, version_cmp, version, default_release, cache, state='install')
+ if installed:
+ return installed, installed_version, version_installable, has_files
+
+ # Otherwise return nothing so apt will sort out
+ # what package to satisfy this with
+ return False, False, True, False
+
+ m.fail_json(msg="No package matching '%s' is available" % pkgname)
+ except AttributeError:
+ # python-apt version too old to detect virtual packages
+ # mark as not installed and let apt-get install deal with it
+ return False, False, True, False
+ else:
+ return False, False, None, False
+ try:
+ has_files = len(pkg.installed_files) > 0
+ except UnicodeDecodeError:
+ has_files = True
+ except AttributeError:
+ has_files = False # older python-apt cannot be used to determine non-purged
+
+ try:
+ package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
+ except AttributeError: # python-apt 0.7.X has very weak low-level object
+ try:
+ # might not be necessary as python-apt post-0.7.X should have current_state property
+ package_is_installed = pkg.is_installed
+ except AttributeError:
+ # assume older version of python-apt is installed
+ package_is_installed = pkg.isInstalled
+
+ version_best = package_best_match(pkgname, version_cmp, version, default_release, cache._cache)
+ version_is_installed = False
+ version_installable = None
+ if package_is_installed:
+ try:
+ installed_version = pkg.installed.version
+ except AttributeError:
+ installed_version = pkg.installedVersion
+
+ if version_cmp == "=":
+ # check if the version is matched as well
+ version_is_installed = fnmatch.fnmatch(installed_version, version)
+ if version_best and installed_version != version_best and fnmatch.fnmatch(version_best, version):
+ version_installable = version_best
+ elif version_cmp == ">=":
+ version_is_installed = apt_pkg.version_compare(installed_version, version) >= 0
+ if version_best and installed_version != version_best and apt_pkg.version_compare(version_best, version) >= 0:
+ version_installable = version_best
+ else:
+ version_is_installed = True
+ if version_best and installed_version != version_best:
+ version_installable = version_best
+ else:
+ version_installable = version_best
+
+ return package_is_installed, version_is_installed, version_installable, has_files
+
+
+def expand_dpkg_options(dpkg_options_compressed):
+ options_list = dpkg_options_compressed.split(',')
+ dpkg_options = ""
+ for dpkg_option in options_list:
+ dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
+ % (dpkg_options, dpkg_option)
+ return dpkg_options.strip()
+
+
+def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
+ # Note: apt-get does implicit regex matching when an exact package name
+ # match is not found. Something like this:
+ # matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
+ # (Should also deal with the ':' for multiarch like the fnmatch code below)
+ #
+ # We have decided not to do similar implicit regex matching but might take
+ # a PR to add some sort of explicit regex matching:
+ # https://github.com/ansible/ansible-modules-core/issues/1258
+ new_pkgspec = []
+ if pkgspec:
+ for pkgspec_pattern in pkgspec:
+
+ if not isinstance(pkgspec_pattern, string_types):
+ m.fail_json(msg="Invalid type for package name, expected string but got %s" % type(pkgspec_pattern))
+
+ pkgname_pattern, version_cmp, version = package_split(pkgspec_pattern)
+
+ # note that none of these chars is allowed in a (debian) pkgname
+ if frozenset('*?[]!').intersection(pkgname_pattern):
+ # handle multiarch pkgnames, the idea is that "apt*" should
+ # only select native packages. But "apt*:i386" should still work
+ if ":" not in pkgname_pattern:
+ # Filter the multiarch packages from the cache only once
+ try:
+ pkg_name_cache = _non_multiarch # pylint: disable=used-before-assignment
+ except NameError:
+ pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
+ else:
+ # Create a cache of pkg_names including multiarch only once
+ try:
+ pkg_name_cache = _all_pkg_names # pylint: disable=used-before-assignment
+ except NameError:
+ pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
+
+ matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
+
+ if not matches:
+ m.fail_json(msg="No package(s) matching '%s' available" % to_text(pkgname_pattern))
+ else:
+ new_pkgspec.extend(matches)
+ else:
+ # No wildcards in name
+ new_pkgspec.append(pkgspec_pattern)
+ return new_pkgspec
+
+
+def parse_diff(output):
+ diff = to_native(output).splitlines()
+ try:
+ # check for start marker from aptitude
+ diff_start = diff.index('Resolving dependencies...')
+ except ValueError:
+ try:
+ # check for start marker from apt-get
+ diff_start = diff.index('Reading state information...')
+ except ValueError:
+ # show everything
+ diff_start = -1
+ try:
+ # check for end marker line from both apt-get and aptitude
+ diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
+ except StopIteration:
+ diff_end = len(diff)
+ diff_start += 1
+ diff_end += 1
+ return {'prepared': '\n'.join(diff[diff_start:diff_end])}
+
+
+def mark_installed_manually(m, packages):
+ if not packages:
+ return
+
+ apt_mark_cmd_path = m.get_bin_path("apt-mark")
+
+ # https://github.com/ansible/ansible/issues/40531
+ if apt_mark_cmd_path is None:
+ m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
+ return
+
+ cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
+ cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if rc != 0:
+ m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+
+
+def install(m, pkgspec, cache, upgrade=False, default_release=None,
+ install_recommends=None, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
+ build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False,
+ allow_unauthenticated=False, allow_downgrade=False, allow_change_held_packages=False):
+ pkg_list = []
+ packages = ""
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ package_names = []
+ for package in pkgspec:
+ if build_dep:
+ # Let apt decide what to install
+ pkg_list.append("'%s'" % package)
+ continue
+
+ name, version_cmp, version = package_split(package)
+ package_names.append(name)
+ installed, installed_version, version_installable, has_files = package_status(m, name, version_cmp, version, default_release, cache, state='install')
+
+ if not installed and only_upgrade:
+ # only_upgrade upgrades packages that are already installed
+ # since this package is not installed, skip it
+ continue
+
+ if not installed_version and not version_installable:
+ status = False
+ data = dict(msg="no available installation candidate for %s" % package)
+ return (status, data)
+
+ if version_installable and ((not installed and not only_upgrade) or upgrade or not installed_version):
+ if version_installable is not True:
+ pkg_list.append("'%s=%s'" % (name, version_installable))
+ elif version:
+ pkg_list.append("'%s=%s'" % (name, version))
+ else:
+ pkg_list.append("'%s'" % name)
+ elif installed_version and version_installable and version_cmp == "=":
+ # This happens when the package is installed, a newer version is
+ # available, and the version is a wildcard that matches both
+ #
+ # This is legacy behavior, and isn't documented (in fact it does
+ # things documentations says it shouldn't). It should not be relied
+ # upon.
+ pkg_list.append("'%s=%s'" % (name, version))
+ packages = ' '.join(pkg_list)
+
+ if packages:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
+ if only_upgrade:
+ only_upgrade = '--only-upgrade'
+ else:
+ only_upgrade = ''
+
+ if fixed:
+ fixed = '--fix-broken'
+ else:
+ fixed = ''
+
+ if build_dep:
+ cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages)
+ else:
+ cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
+ (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ if install_recommends is False:
+ cmd += " -o APT::Install-Recommends=no"
+ elif install_recommends is True:
+ cmd += " -o APT::Install-Recommends=yes"
+ # install_recommends is None uses the OS default
+
+ if allow_unauthenticated:
+ cmd += " --allow-unauthenticated"
+
+ if allow_downgrade:
+ cmd += " --allow-downgrades"
+
+ if allow_change_held_packages:
+ cmd += " --allow-change-held-packages"
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ status = True
+
+ changed = True
+ if build_dep:
+ changed = APT_GET_ZERO not in out
+
+ data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
+ if rc:
+ status = False
+ data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+ else:
+ status = True
+ data = dict(changed=False)
+
+ if not build_dep and not m.check_mode:
+ mark_installed_manually(m, package_names)
+
+ return (status, data)
+
+
+def get_field_of_deb(m, deb_file, field="Version"):
+ cmd_dpkg = m.get_bin_path("dpkg", True)
+ cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
+ rc, stdout, stderr = m.run_command(cmd)
+ if rc != 0:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ return to_native(stdout).strip('\n')
+
+
+def install_deb(
+ m, debs, cache, force, fail_on_autoremove, install_recommends,
+ allow_unauthenticated,
+ allow_downgrade,
+ allow_change_held_packages,
+ dpkg_options,
+):
+ changed = False
+ deps_to_install = []
+ pkgs_to_install = []
+ for deb_file in debs.split(','):
+ try:
+ pkg = apt.debfile.DebPackage(deb_file, cache=apt.Cache())
+ pkg_name = get_field_of_deb(m, deb_file, "Package")
+ pkg_version = get_field_of_deb(m, deb_file, "Version")
+ if hasattr(apt_pkg, 'get_architectures') and len(apt_pkg.get_architectures()) > 1:
+ pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
+ pkg_key = "%s:%s" % (pkg_name, pkg_arch)
+ else:
+ pkg_key = pkg_name
+ try:
+ installed_pkg = apt.Cache()[pkg_key]
+ installed_version = installed_pkg.installed.version
+ if package_version_compare(pkg_version, installed_version) == 0:
+ # Does not need to down-/upgrade, move on to next package
+ continue
+ except Exception:
+ # Must not be installed, continue with installation
+ pass
+ # Check if package is installable
+ if not pkg.check():
+ if force or ("later version" in pkg._failure_string and allow_downgrade):
+ pass
+ else:
+ m.fail_json(msg=pkg._failure_string)
+
+ # add any missing deps to the list of deps we need
+ # to install so they're all done in one shot
+ deps_to_install.extend(pkg.missing_deps)
+
+ except Exception as e:
+ m.fail_json(msg="Unable to install package: %s" % to_native(e))
+
+ # and add this deb to the list of packages to install
+ pkgs_to_install.append(deb_file)
+
+ # install the deps through apt
+ retvals = {}
+ if deps_to_install:
+ (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
+ install_recommends=install_recommends,
+ fail_on_autoremove=fail_on_autoremove,
+ allow_unauthenticated=allow_unauthenticated,
+ allow_downgrade=allow_downgrade,
+ allow_change_held_packages=allow_change_held_packages,
+ dpkg_options=expand_dpkg_options(dpkg_options))
+ if not success:
+ m.fail_json(**retvals)
+ changed = retvals.get('changed', False)
+
+ if pkgs_to_install:
+ options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
+ if m.check_mode:
+ options += " --simulate"
+ if force:
+ options += " --force-all"
+
+ cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if "stdout" in retvals:
+ stdout = retvals["stdout"] + out
+ else:
+ stdout = out
+ if "diff" in retvals:
+ diff = retvals["diff"]
+ if 'prepared' in diff:
+ diff['prepared'] += '\n\n' + out
+ else:
+ diff = parse_diff(out)
+ if "stderr" in retvals:
+ stderr = retvals["stderr"] + err
+ else:
+ stderr = err
+
+ if rc == 0:
+ m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
+ else:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
+
+
+def remove(m, pkgspec, cache, purge=False, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
+ pkg_list = []
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ for package in pkgspec:
+ name, version_cmp, version = package_split(package)
+ installed, installed_version, upgradable, has_files = package_status(m, name, version_cmp, version, None, cache, state='remove')
+ if installed_version or (has_files and purge):
+ pkg_list.append("'%s'" % package)
+ packages = ' '.join(pkg_list)
+
+ if not packages:
+ m.exit_json(changed=False)
+ else:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
+ m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
+
+
+def cleanup(m, purge=False, force=False, operation=None,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
+
+ if operation not in frozenset(['autoremove', 'autoclean']):
+ raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
+
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
+
+ changed = CLEAN_OP_CHANGED_STR[operation] in out
+
+ m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
+
+
+def aptclean(m):
+ clean_rc, clean_out, clean_err = m.run_command(['apt-get', 'clean'])
+ if m._diff:
+ clean_diff = parse_diff(clean_out)
+ else:
+ clean_diff = {}
+ if clean_rc:
+ m.fail_json(msg="apt-get clean failed", stdout=clean_out, rc=clean_rc)
+ if clean_err:
+ m.fail_json(msg="apt-get clean failed: %s" % clean_err, stdout=clean_out, rc=clean_rc)
+ return clean_out, clean_err
+
+
+def upgrade(m, mode="yes", force=False, default_release=None,
+ use_apt_get=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove=False,
+ allow_unauthenticated=False,
+ allow_downgrade=False,
+ ):
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ apt_cmd = None
+ prompt_regex = None
+ if mode == "dist" or (mode == "full" and use_apt_get):
+ # apt-get dist-upgrade
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "dist-upgrade %s" % (autoremove)
+ elif mode == "full" and not use_apt_get:
+ # aptitude full-upgrade
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "full-upgrade"
+ else:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
+ else:
+ # aptitude safe-upgrade # mode=yes # default
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "safe-upgrade"
+ prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
+
+ if force:
+ if apt_cmd == APT_GET_CMD:
+ force_yes = '--force-yes'
+ else:
+ force_yes = '--assume-yes --allow-untrusted'
+ else:
+ force_yes = ''
+
+ if fail_on_autoremove:
+ fail_on_autoremove = '--no-remove'
+ else:
+ fail_on_autoremove = ''
+
+ allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
+
+ allow_downgrade = '--allow-downgrades' if allow_downgrade else ''
+
+ if apt_cmd is None:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ else:
+ m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
+ "to have APTITUDE in path or use 'force_apt_get=True'")
+ apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
+
+ cmd = '%s -y %s %s %s %s %s %s %s' % (
+ apt_cmd_path,
+ dpkg_options,
+ force_yes,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade,
+ check_arg,
+ upgrade_command,
+ )
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
+ if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
+ m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
+ m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
+
+
+def get_cache_mtime():
+ """Return mtime of a valid apt cache file.
+ Stat the apt cache file and if no cache file is found return 0
+ :returns: ``int``
+ """
+ cache_time = 0
+ if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
+ cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
+ elif os.path.exists(APT_LISTS_PATH):
+ cache_time = os.stat(APT_LISTS_PATH).st_mtime
+ return cache_time
+
+
+def get_updated_cache_time():
+ """Return the mtime time stamp and the updated cache time.
+ Always retrieve the mtime of the apt cache or set the `cache_mtime`
+ variable to 0
+ :returns: ``tuple``
+ """
+ cache_mtime = get_cache_mtime()
+ mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
+ updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
+ return mtimestamp, updated_cache_time
+
+
+# https://github.com/ansible/ansible-modules-core/issues/2951
+def get_cache(module):
+ '''Attempt to get the cache object and update till it works'''
+ cache = None
+ try:
+ cache = apt.Cache()
+ except SystemError as e:
+ if '/var/lib/apt/lists/' in to_native(e).lower():
+ # update cache until files are fixed or retries exceeded
+ retries = 0
+ while retries < 2:
+ (rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
+ retries += 1
+ if rc == 0:
+ break
+ if rc != 0:
+ module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
+ # try again
+ cache = apt.Cache()
+ else:
+ module.fail_json(msg=to_native(e))
+ return cache
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
+ update_cache=dict(type='bool', aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ cache_valid_time=dict(type='int', default=0),
+ purge=dict(type='bool', default=False),
+ package=dict(type='list', elements='str', aliases=['pkg', 'name']),
+ deb=dict(type='path'),
+ default_release=dict(type='str', aliases=['default-release']),
+ install_recommends=dict(type='bool', aliases=['install-recommends']),
+ force=dict(type='bool', default=False),
+ upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes'], default='no'),
+ dpkg_options=dict(type='str', default=DPKG_OPTIONS),
+ autoremove=dict(type='bool', default=False),
+ autoclean=dict(type='bool', default=False),
+ fail_on_autoremove=dict(type='bool', default=False),
+ policy_rc_d=dict(type='int', default=None),
+ only_upgrade=dict(type='bool', default=False),
+ force_apt_get=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
+ allow_downgrade=dict(type='bool', default=False, aliases=['allow-downgrade', 'allow_downgrades', 'allow-downgrades']),
+ allow_change_held_packages=dict(type='bool', default=False),
+ lock_timeout=dict(type='int', default=60),
+ ),
+ mutually_exclusive=[['deb', 'package', 'upgrade']],
+ required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ # We screenscrape apt-get and aptitude output for information so we need
+ # to make sure we use the best parsable locale when running commands
+ # also set apt specific vars for desired behaviour
+ locale = get_best_parsable_locale(module)
+ # APT related constants
+ APT_ENV_VARS = dict(
+ DEBIAN_FRONTEND='noninteractive',
+ DEBIAN_PRIORITY='critical',
+ LANG=locale,
+ LC_ALL=locale,
+ LC_MESSAGES=locale,
+ LC_CTYPE=locale,
+ )
+ module.run_command_environ_update = APT_ENV_VARS
+
+ if not HAS_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
+ # We skip cache update in auto install the dependency if the
+ # user explicitly declared it with update_cache=no.
+ if module.params.get('update_cache') is False:
+ module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
+ else:
+ module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
+ module.run_command(['apt-get', 'update'], check_rc=True)
+
+ # try to install the apt Python binding
+ module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+ else:
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ global APTITUDE_CMD
+ APTITUDE_CMD = module.get_bin_path("aptitude", False)
+ global APT_GET_CMD
+ APT_GET_CMD = module.get_bin_path("apt-get")
+
+ p = module.params
+
+ if p['clean'] is True:
+ aptclean_stdout, aptclean_stderr = aptclean(module)
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
+ if not p['package'] and not p['upgrade'] and not p['deb']:
+ module.exit_json(
+ changed=True,
+ msg=aptclean_stdout,
+ stdout=aptclean_stdout,
+ stderr=aptclean_stderr
+ )
+
+ if p['upgrade'] == 'no':
+ p['upgrade'] = None
+
+ use_apt_get = p['force_apt_get']
+
+ if not use_apt_get and not APTITUDE_CMD:
+ use_apt_get = True
+
+ updated_cache = False
+ updated_cache_time = 0
+ install_recommends = p['install_recommends']
+ allow_unauthenticated = p['allow_unauthenticated']
+ allow_downgrade = p['allow_downgrade']
+ allow_change_held_packages = p['allow_change_held_packages']
+ dpkg_options = expand_dpkg_options(p['dpkg_options'])
+ autoremove = p['autoremove']
+ fail_on_autoremove = p['fail_on_autoremove']
+ autoclean = p['autoclean']
+
+ # max times we'll retry
+ deadline = time.time() + p['lock_timeout']
+
+ # keep running on lock issues unless timeout or resolution is hit.
+ while True:
+
+ # Get the cache object, this has 3 retries built in
+ cache = get_cache(module)
+
+ try:
+ if p['default_release']:
+ try:
+ apt_pkg.config['APT::Default-Release'] = p['default_release']
+ except AttributeError:
+ apt_pkg.Config['APT::Default-Release'] = p['default_release']
+ # reopen cache w/ modified config
+ cache.open(progress=None)
+
+ mtimestamp, updated_cache_time = get_updated_cache_time()
+ # Cache valid time is default 0, which will update the cache if
+ # needed and `update_cache` was set to true
+ updated_cache = False
+ if p['update_cache'] or p['cache_valid_time']:
+ now = datetime.datetime.now()
+ tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
+ if not mtimestamp + tdelta >= now:
+ # Retry to update the cache with exponential backoff
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ if not module.check_mode:
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ cache.open(progress=None)
+ mtimestamp, post_cache_update_time = get_updated_cache_time()
+ if module.check_mode or updated_cache_time != post_cache_update_time:
+ updated_cache = True
+ updated_cache_time = post_cache_update_time
+
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
+ if not p['package'] and not p['upgrade'] and not p['deb']:
+ module.exit_json(
+ changed=updated_cache,
+ cache_updated=updated_cache,
+ cache_update_time=updated_cache_time
+ )
+
+ force_yes = p['force']
+
+ if p['upgrade']:
+ upgrade(
+ module,
+ p['upgrade'],
+ force_yes,
+ p['default_release'],
+ use_apt_get,
+ dpkg_options,
+ autoremove,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade
+ )
+
+ if p['deb']:
+ if p['state'] != 'present':
+ module.fail_json(msg="deb only supports state=present")
+ if '://' in p['deb']:
+ p['deb'] = fetch_file(module, p['deb'])
+ install_deb(module, p['deb'], cache,
+ install_recommends=install_recommends,
+ allow_unauthenticated=allow_unauthenticated,
+ allow_change_held_packages=allow_change_held_packages,
+ allow_downgrade=allow_downgrade,
+ force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
+
+ unfiltered_packages = p['package'] or ()
+ packages = [package.strip() for package in unfiltered_packages if package != '*']
+ all_installed = '*' in unfiltered_packages
+ latest = p['state'] == 'latest'
+
+ if latest and all_installed:
+ if packages:
+ module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
+ upgrade(
+ module,
+ 'yes',
+ force_yes,
+ p['default_release'],
+ use_apt_get,
+ dpkg_options,
+ autoremove,
+ fail_on_autoremove,
+ allow_unauthenticated,
+ allow_downgrade
+ )
+
+ if packages:
+ for package in packages:
+ if package.count('=') > 1:
+ module.fail_json(msg="invalid package spec: %s" % package)
+
+ if not packages:
+ if autoclean:
+ cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
+ if autoremove:
+ cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
+
+ if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
+ state_upgrade = False
+ state_builddep = False
+ state_fixed = False
+ if p['state'] == 'latest':
+ state_upgrade = True
+ if p['state'] == 'build-dep':
+ state_builddep = True
+ if p['state'] == 'fixed':
+ state_fixed = True
+
+ success, retvals = install(
+ module,
+ packages,
+ cache,
+ upgrade=state_upgrade,
+ default_release=p['default_release'],
+ install_recommends=install_recommends,
+ force=force_yes,
+ dpkg_options=dpkg_options,
+ build_dep=state_builddep,
+ fixed=state_fixed,
+ autoremove=autoremove,
+ fail_on_autoremove=fail_on_autoremove,
+ only_upgrade=p['only_upgrade'],
+ allow_unauthenticated=allow_unauthenticated,
+ allow_downgrade=allow_downgrade,
+ allow_change_held_packages=allow_change_held_packages,
+ )
+
+ # Store if the cache has been updated
+ retvals['cache_updated'] = updated_cache
+ # Store when the update time was last
+ retvals['cache_update_time'] = updated_cache_time
+
+ if success:
+ module.exit_json(**retvals)
+ else:
+ module.fail_json(**retvals)
+ elif p['state'] == 'absent':
+ remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
+
+ except apt.cache.LockFailedException as lockFailedException:
+ if time.time() < deadline:
+ continue
+ module.fail_json(msg="Failed to lock apt for exclusive operation: %s" % lockFailedException)
+ except apt.cache.FetchFailedException as fetchFailedException:
+ module.fail_json(msg="Could not fetch updated apt files: %s" % fetchFailedException)
+
+ # got here w/o exception and/or exit???
+ module.fail_json(msg='Unexpected code path taken, we really should have exited before, this is a bug')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
new file mode 100644
index 0000000..67caf6d
--- /dev/null
+++ b/lib/ansible/modules/apt_key.py
@@ -0,0 +1,530 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_key
+author:
+- Jayson Vantuyl (@jvantuyl)
+version_added: "1.0"
+short_description: Add or remove an apt key
+description:
+ - Add or remove an I(apt) key, optionally downloading it.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: debian
+notes:
+ - The apt-key command has been deprecated and suggests to 'manage keyring files in trusted.gpg.d instead'. See the Debian wiki for details.
+ This module is kept for backwards compatibility for systems that still use apt-key as the main way to manage apt repository keys.
+ - As a sanity check, downloaded key id must match the one specified.
+ - "Use full fingerprint (40 characters) key ids to avoid key collisions.
+ To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
+ - If you specify both the key id and the URL with C(state=present), the task can verify or add the key as needed.
+ - Adding a new key requires an apt cache update (e.g. using the M(ansible.builtin.apt) module's update_cache option).
+requirements:
+ - gpg
+options:
+ id:
+ description:
+ - The identifier of the key.
+ - Including this allows check mode to correctly report the changed state.
+ - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
+ - This parameter is required when C(state) is set to C(absent).
+ type: str
+ data:
+ description:
+ - The keyfile contents to add to the keyring.
+ type: str
+ file:
+ description:
+ - The path to a keyfile on the remote server to add to the keyring.
+ type: path
+ keyring:
+ description:
+ - The full path to specific keyring file in C(/etc/apt/trusted.gpg.d/).
+ type: path
+ version_added: "1.3"
+ url:
+ description:
+ - The URL to retrieve key from.
+ type: str
+ keyserver:
+ description:
+ - The keyserver to retrieve key from.
+ type: str
+ version_added: "1.6"
+ state:
+ description:
+ - Ensures that the key is present (added) or absent (revoked).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: One way to avoid apt_key once it is removed from your distro
+ block:
+ - name: somerepo |no apt key
+ ansible.builtin.get_url:
+ url: https://download.example.com/linux/ubuntu/gpg
+ dest: /etc/apt/trusted.gpg.d/somerepo.asc
+
+ - name: somerepo | apt source
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ state: present
+
+- name: Add an apt key by id from a keyserver
+ ansible.builtin.apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+- name: Add an Apt signing key, uses whichever key is at the URL
+ ansible.builtin.apt_key:
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Add an Apt signing key, will not download if present
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Remove a Apt specific signing key, leading 0x is valid
+ ansible.builtin.apt_key:
+ id: 0x9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ state: absent
+
+# Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
+- name: Add a key from a file on the Ansible server
+ ansible.builtin.apt_key:
+ data: "{{ lookup('ansible.builtin.file', 'apt.asc') }}"
+ state: present
+
+- name: Add an Apt signing key to a specific keyring file
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ keyring: /etc/apt/trusted.gpg.d/debian.gpg
+
+- name: Add Apt signing key on remote server to keyring
+ ansible.builtin.apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ file: /tmp/apt.gpg
+ state: present
+'''
+
+RETURN = '''
+after:
+ description: List of apt key ids or fingerprints after any modification
+ returned: on change
+ type: list
+ sample: ["D8576A8BA88D21E9", "3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+before:
+ description: List of apt key ids or fingprints before any modifications
+ returned: always
+ type: list
+ sample: ["3B4FE6ACC0B21F32", "D94AA3F0EFE21092", "871920D1991BC93C"]
+fp:
+ description: Fingerprint of the key to import
+ returned: always
+ type: str
+ sample: "D8576A8BA88D21E9"
+id:
+ description: key id from source
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+key_id:
+ description: calculated key id, it should be same as 'id', but can be different
+ returned: always
+ type: str
+ sample: "36A1D7869245C8950F966E92D8576A8BA88D21E9"
+short_id:
+ description: calculated short key id
+ returned: always
+ type: str
+ sample: "A88D21E9"
+'''
+
+import os
+
+# FIXME: standardize into module_common
+from traceback import format_exc
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.urls import fetch_url
+
+
+apt_key_bin = None
+gpg_bin = None
+locale = None
+
+
+def lang_env(module):
+
+ if not hasattr(lang_env, 'result'):
+ locale = get_best_parsable_locale(module)
+ lang_env.result = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+
+ return lang_env.result
+
+
+def find_needed_binaries(module):
+ global apt_key_bin
+ global gpg_bin
+ apt_key_bin = module.get_bin_path('apt-key', required=True)
+ gpg_bin = module.get_bin_path('gpg', required=True)
+
+
+def add_http_proxy(cmd):
+
+ for envvar in ('HTTPS_PROXY', 'https_proxy', 'HTTP_PROXY', 'http_proxy'):
+ proxy = os.environ.get(envvar)
+ if proxy:
+ break
+
+ if proxy:
+ cmd += ' --keyserver-options http-proxy=%s' % proxy
+
+ return cmd
+
+
+def parse_key_id(key_id):
+ """validate the key_id and break it into segments
+
+ :arg key_id: The key_id as supplied by the user. A valid key_id will be
+ 8, 16, or more hexadecimal chars with an optional leading ``0x``.
+ :returns: The portion of key_id suitable for apt-key del, the portion
+ suitable for comparisons with --list-public-keys, and the portion that
+ can be used with --recv-key. If key_id is long enough, these will be
+ the last 8 characters of key_id, the last 16 characters, and all of
+ key_id. If key_id is not long enough, some of the values will be the
+ same.
+
+ * apt-key del <= 1.10 has a bug with key_id != 8 chars
+ * apt-key adv --list-public-keys prints 16 chars
+ * apt-key adv --recv-key can take more chars
+
+ """
+ # Make sure the key_id is valid hexadecimal
+ int(to_native(key_id), 16)
+
+ key_id = key_id.upper()
+ if key_id.startswith('0X'):
+ key_id = key_id[2:]
+
+ key_id_len = len(key_id)
+ if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
+ raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
+
+ short_key_id = key_id[-8:]
+
+ fingerprint = key_id
+ if key_id_len > 16:
+ fingerprint = key_id[-16:]
+
+ return short_key_id, fingerprint, key_id
+
+
+def parse_output_for_keys(output, short_format=False):
+
+ found = []
+ lines = to_native(output).split('\n')
+ for line in lines:
+ if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
+ try:
+ # apt key format
+ tokens = line.split()
+ code = tokens[1]
+ (len_type, real_code) = code.split("/")
+ except (IndexError, ValueError):
+ # gpg format
+ try:
+ tokens = line.split(':')
+ real_code = tokens[4]
+ except (IndexError, ValueError):
+ # invalid line, skip
+ continue
+ found.append(real_code)
+
+ if found and short_format:
+ found = shorten_key_ids(found)
+
+ return found
+
+
+def all_keys(module, keyring, short_format):
+ if keyring is not None:
+ cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Unable to list public keys", cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ return parse_output_for_keys(out, short_format)
+
+
+def shorten_key_ids(key_id_list):
+ """
+ Takes a list of key ids, and converts them to the 'short' format,
+ by reducing them to their last 8 characters.
+ """
+ short = []
+ for key in key_id_list:
+ short.append(key[-8:])
+ return short
+
+
+def download_key(module, url):
+
+ try:
+ # note: validate_certs and other args are pulled from module directly
+ rsp, info = fetch_url(module, url, use_proxy=True)
+ if info['status'] != 200:
+ module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
+
+ return rsp.read()
+ except Exception:
+ module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+
+
+def get_key_id_from_file(module, filename, data=None):
+
+ native_data = to_native(data)
+ is_armored = native_data.find("-----BEGIN PGP PUBLIC KEY BLOCK-----") >= 0
+
+ key = None
+
+ cmd = [gpg_bin, '--with-colons', filename]
+
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env(module), data=(native_data if is_armored else data), binary_data=not is_armored)
+ if rc != 0:
+ module.fail_json(msg="Unable to extract key from '%s'" % ('inline data' if data is not None else filename), stdout=out, stderr=err)
+
+ keys = parse_output_for_keys(out)
+ # assume we only want first key?
+ if keys:
+ key = keys[0]
+
+ return key
+
+
+def get_key_id_from_data(module, data):
+ return get_key_id_from_file(module, '-', data)
+
+
+def import_key(module, keyring, keyserver, key_id):
+
+ if keyring:
+ cmd = "%s --keyring %s adv --no-tty --keyserver %s" % (apt_key_bin, keyring, keyserver)
+ else:
+ cmd = "%s adv --no-tty --keyserver %s" % (apt_key_bin, keyserver)
+
+ # check for proxy
+ cmd = add_http_proxy(cmd)
+
+ # add recv argument as last one
+ cmd = "%s --recv %s" % (cmd, key_id)
+
+ for retry in range(5):
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env(module))
+ if rc == 0:
+ break
+ else:
+ # Out of retries
+ if rc == 2 and 'not found on keyserver' in out:
+ msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module))
+ else:
+ msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, forced_environment=lang_env(module), rc=rc, stdout=out, stderr=err)
+ return True
+
+
+def add_key(module, keyfile, keyring, data=None):
+ if data is not None:
+ if keyring:
+ cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s add -" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to add a key from binary data",
+ cmd=cmd,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ )
+ else:
+ if keyring:
+ cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
+ else:
+ cmd = "%s add %s" % (apt_key_bin, keyfile)
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to add a key from file %s" % (keyfile),
+ cmd=cmd,
+ rc=rc,
+ keyfile=keyfile,
+ stdout=out,
+ stderr=err,
+ )
+ return True
+
+
+def remove_key(module, key_id, keyring):
+ if keyring:
+ cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
+ else:
+ cmd = '%s del %s' % (apt_key_bin, key_id)
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(
+ msg="Unable to remove a key with id %s" % (key_id),
+ cmd=cmd,
+ rc=rc,
+ key_id=key_id,
+ stdout=out,
+ stderr=err,
+ )
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str'),
+ url=dict(type='str'),
+ data=dict(type='str'),
+ file=dict(type='path'),
+ keyring=dict(type='path'),
+ validate_certs=dict(type='bool', default=True),
+ keyserver=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('data', 'file', 'keyserver', 'url'),),
+ )
+
+ # parameters
+ key_id = module.params['id']
+ url = module.params['url']
+ data = module.params['data']
+ filename = module.params['file']
+ keyring = module.params['keyring']
+ state = module.params['state']
+ keyserver = module.params['keyserver']
+
+ # internal vars
+ short_format = False
+ short_key_id = None
+ fingerprint = None
+ error_no_error = "apt-key did not return an error, but %s (check that the id is correct and *not* a subkey)"
+
+ # ensure we have requirements met
+ find_needed_binaries(module)
+
+ # initialize result dict
+ r = {'changed': False}
+
+ if not key_id:
+
+ if keyserver:
+ module.fail_json(msg="Missing key_id, required with keyserver.")
+
+ if url:
+ data = download_key(module, url)
+
+ if filename:
+ key_id = get_key_id_from_file(module, filename)
+ elif data:
+ key_id = get_key_id_from_data(module, data)
+
+ r['id'] = key_id
+ try:
+ short_key_id, fingerprint, key_id = parse_key_id(key_id)
+ r['short_id'] = short_key_id
+ r['fp'] = fingerprint
+ r['key_id'] = key_id
+ except ValueError:
+ module.fail_json(msg='Invalid key_id', **r)
+
+ if not fingerprint:
+ # invalid key should fail well before this point, but JIC ...
+ module.fail_json(msg="Unable to continue as we could not extract a valid fingerprint to compare against existing keys.", **r)
+
+ if len(key_id) == 8:
+ short_format = True
+
+ # get existing keys to verify if we need to change
+ r['before'] = keys = all_keys(module, keyring, short_format)
+ keys2 = []
+
+ if state == 'present':
+ if (short_format and short_key_id not in keys) or (not short_format and fingerprint not in keys):
+ r['changed'] = True
+ if not module.check_mode:
+ if filename:
+ add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyring, keyserver, key_id)
+ elif data:
+ # this also takes care of url if key_id was not provided
+ add_key(module, "-", keyring, data)
+ elif url:
+ # we hit this branch only if key_id is supplied with url
+ data = download_key(module, url)
+ add_key(module, "-", keyring, data)
+ else:
+ module.fail_json(msg="No key to add ... how did i get here?!?!", **r)
+
+ # verify it got added
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if (short_format and short_key_id not in keys2) or (not short_format and fingerprint not in keys2):
+ module.fail_json(msg=error_no_error % 'failed to add the key', **r)
+
+ elif state == 'absent':
+ if not key_id:
+ module.fail_json(msg="key is required to remove a key", **r)
+ if fingerprint in keys:
+ r['changed'] = True
+ if not module.check_mode:
+ # we use the "short" id: key_id[-8:], short_format=True
+ # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
+ if short_key_id is not None and remove_key(module, short_key_id, keyring):
+ r['after'] = keys2 = all_keys(module, keyring, short_format)
+ if fingerprint in keys2:
+ module.fail_json(msg=error_no_error % 'the key was not removed', **r)
+ else:
+ module.fail_json(msg="error removing key_id", **r)
+
+ module.exit_json(**r)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
new file mode 100644
index 0000000..f9a0cd9
--- /dev/null
+++ b/lib/ansible/modules/apt_repository.py
@@ -0,0 +1,735 @@
+# encoding: utf-8
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# Copyright: (c) 2013, Alexander Saltanov <asd@mokote.com>
+# Copyright: (c) 2014, Rutger Spiertz <rutger@kumina.nl>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_repository
+short_description: Add and remove APT repositories
+description:
+ - Add or remove an APT repositories in Ubuntu and Debian.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: debian
+notes:
+ - This module supports Debian Squeeze (version 6) as well as its successors and derivatives.
+options:
+ repo:
+ description:
+ - A source string for the repository.
+ type: str
+ required: true
+ state:
+ description:
+ - A source string state.
+ type: str
+ choices: [ absent, present ]
+ default: "present"
+ mode:
+ description:
+ - The octal mode for newly created files in sources.list.d.
+ - Default is what system uses (probably 0644).
+ type: raw
+ version_added: "1.6"
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
+ type: bool
+ default: "yes"
+ aliases: [ update-cache ]
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ version_added: '1.8'
+ filename:
+ description:
+ - Sets the name of the source list file in sources.list.d.
+ Defaults to a file name based on the repository source url.
+ The .list extension will be automatically added.
+ type: str
+ version_added: '2.1'
+ codename:
+ description:
+ - Override the distribution codename to use for PPA repositories.
+ Should usually only be set when working with a PPA on
+ a non-Ubuntu target (for example, Debian or Mint).
+ type: str
+ version_added: '2.3'
+ install_python_apt:
+ description:
+ - Whether to automatically try to install the Python apt library or not, if it is not already installed.
+ Without this library, the module does not work.
+ - Runs C(apt-get install python-apt) for Python 2, and C(apt-get install python3-apt) for Python 3.
+ - Only works with the system Python 2 or Python 3. If you are using a Python on the remote that is not
+ the system Python, set I(install_python_apt=false) and ensure that the Python apt library
+ for your Python version is installed some other way.
+ type: bool
+ default: true
+author:
+- Alexander Saltanov (@sashka)
+version_added: "0.7"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - apt-key or gpg
+'''
+
+EXAMPLES = '''
+- name: Add specified repository into sources list
+ ansible.builtin.apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Add specified repository into sources list using specified filename
+ ansible.builtin.apt_repository:
+ repo: deb http://dl.google.com/linux/chrome/deb/ stable main
+ state: present
+ filename: google-chrome
+
+- name: Add source repository into sources list
+ ansible.builtin.apt_repository:
+ repo: deb-src http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Remove specified repository from sources list
+ ansible.builtin.apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: absent
+
+- name: Add nginx stable repository from PPA and install its signing key on Ubuntu target
+ ansible.builtin.apt_repository:
+ repo: ppa:nginx/stable
+
+- name: Add nginx stable repository from PPA and install its signing key on Debian target
+ ansible.builtin.apt_repository:
+ repo: 'ppa:nginx/stable'
+ codename: trusty
+
+- name: One way to avoid apt_key once it is removed from your distro
+ block:
+ - name: somerepo |no apt key
+ ansible.builtin.get_url:
+ url: https://download.example.com/linux/ubuntu/gpg
+ dest: /etc/apt/trusted.gpg.d/somerepo.asc
+
+ - name: somerepo | apt source
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/myrepo.asc] https://download.example.com/linux/ubuntu {{ ansible_distribution_release }} stable"
+ state: present
+'''
+
+RETURN = '''#'''
+
+import copy
+import glob
+import json
+import os
+import re
+import sys
+import tempfile
+import random
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY3
+from ansible.module_utils.urls import fetch_url
+
+try:
+ import apt
+ import apt_pkg
+ import aptsources.distro as aptsources_distro
+
+ distro = aptsources_distro.get_distro()
+
+ HAVE_PYTHON_APT = True
+except ImportError:
+ apt = apt_pkg = aptsources_distro = distro = None
+
+ HAVE_PYTHON_APT = False
+
+APT_KEY_DIRS = ['/etc/apt/keyrings', '/etc/apt/trusted.gpg.d', '/usr/share/keyrings']
+DEFAULT_SOURCES_PERM = 0o0644
+VALID_SOURCE_TYPES = ('deb', 'deb-src')
+
+
+def install_python_apt(module, apt_pkg_name):
+
+ if not module.check_mode:
+ apt_get_path = module.get_bin_path('apt-get')
+ if apt_get_path:
+ rc, so, se = module.run_command([apt_get_path, 'update'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
+ rc, so, se = module.run_command([apt_get_path, 'install', apt_pkg_name, '-y', '-q'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (apt_pkg_name, se.strip()))
+ else:
+ module.fail_json(msg="%s must be installed to use check mode" % apt_pkg_name)
+
+
+class InvalidSource(Exception):
+ pass
+
+
+# Simple version of aptsources.sourceslist.SourcesList.
+# No advanced logic and no backups inside.
+class SourcesList(object):
+ def __init__(self, module):
+ self.module = module
+ self.files = {} # group sources by file
+ # Repositories that we're adding -- used to implement mode param
+ self.new_repos = set()
+ self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
+
+ # read sources.list if it exists
+ if os.path.isfile(self.default_file):
+ self.load(self.default_file)
+
+ # read sources.list.d
+ for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
+ self.load(file)
+
+ def __iter__(self):
+ '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
+ for file, sources in self.files.items():
+ for n, valid, enabled, source, comment in sources:
+ if valid:
+ yield file, n, enabled, source, comment
+
+ def _expand_path(self, filename):
+ if '/' in filename:
+ return filename
+ else:
+ return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
+
+ def _suggest_filename(self, line):
+ def _cleanup_filename(s):
+ filename = self.module.params['filename']
+ if filename is not None:
+ return filename
+ return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
+
+ def _strip_username_password(s):
+ if '@' in s:
+ s = s.split('@', 1)
+ s = s[-1]
+ return s
+
+ # Drop options and protocols.
+ line = re.sub(r'\[[^\]]+\]', '', line)
+ line = re.sub(r'\w+://', '', line)
+
+ # split line into valid keywords
+ parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
+
+ # Drop usernames and passwords
+ parts[0] = _strip_username_password(parts[0])
+
+ return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
+
+ def _parse(self, line, raise_if_invalid_or_disabled=False):
+ valid = False
+ enabled = True
+ source = ''
+ comment = ''
+
+ line = line.strip()
+ if line.startswith('#'):
+ enabled = False
+ line = line[1:]
+
+ # Check for another "#" in the line and treat a part after it as a comment.
+ i = line.find('#')
+ if i > 0:
+ comment = line[i + 1:].strip()
+ line = line[:i]
+
+ # Split a source into substring to make sure that it is source spec.
+ # Duplicated whitespaces in a valid source spec will be removed.
+ source = line.strip()
+ if source:
+ chunks = source.split()
+ if chunks[0] in VALID_SOURCE_TYPES:
+ valid = True
+ source = ' '.join(chunks)
+
+ if raise_if_invalid_or_disabled and (not valid or not enabled):
+ raise InvalidSource(line)
+
+ return valid, enabled, source, comment
+
+ @staticmethod
+ def _apt_cfg_file(filespec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_file(filespec)
+ except AttributeError:
+ result = apt_pkg.Config.FindFile(filespec)
+ return result
+
+ @staticmethod
+ def _apt_cfg_dir(dirspec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_dir(dirspec)
+ except AttributeError:
+ result = apt_pkg.Config.FindDir(dirspec)
+ return result
+
+ def load(self, file):
+ group = []
+ f = open(file, 'r')
+ for n, line in enumerate(f):
+ valid, enabled, source, comment = self._parse(line)
+ group.append((n, valid, enabled, source, comment))
+ self.files[file] = group
+
+ def save(self):
+ for filename, sources in list(self.files.items()):
+ if sources:
+ d, fn = os.path.split(filename)
+ try:
+ os.makedirs(d)
+ except OSError as ex:
+ if not os.path.isdir(d):
+ self.module.fail_json("Failed to create directory %s: %s" % (d, to_native(ex)))
+
+ try:
+ fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
+ except (OSError, IOError) as e:
+ self.module.fail_json(msg='Unable to create temp file at "%s" for apt source: %s' % (d, to_native(e)))
+
+ f = os.fdopen(fd, 'w')
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ line = ''.join(chunks)
+
+ try:
+ f.write(line)
+ except IOError as ex:
+ self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(ex)))
+ self.module.atomic_move(tmp_path, filename)
+
+ # allow the user to override the default mode
+ if filename in self.new_repos:
+ this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
+ self.module.set_mode_if_different(filename, this_mode, False)
+ else:
+ del self.files[filename]
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ def dump(self):
+ dumpstruct = {}
+ for filename, sources in self.files.items():
+ if sources:
+ lines = []
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ lines.append(''.join(chunks))
+ dumpstruct[filename] = ''.join(lines)
+ return dumpstruct
+
+ def _choice(self, new, old):
+ if new is None:
+ return old
+ return new
+
+ def modify(self, file, n, enabled=None, source=None, comment=None):
+ '''
+ This function to be used with iterator, so we don't care of invalid sources.
+ If source, enabled, or comment is None, original value from line ``n`` will be preserved.
+ '''
+ valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
+ self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
+
+ def _add_valid_source(self, source_new, comment_new, file):
+ # We'll try to reuse disabled source if we have it.
+ # If we have more than one entry, we will enable them all - no advanced logic, remember.
+ self.module.log('ading source file: %s | %s | %s' % (source_new, comment_new, file))
+ found = False
+ for filename, n, enabled, source, comment in self:
+ if source == source_new:
+ self.modify(filename, n, enabled=True)
+ found = True
+
+ if not found:
+ if file is None:
+ file = self.default_file
+ else:
+ file = self._expand_path(file)
+
+ if file not in self.files:
+ self.files[file] = []
+
+ files = self.files[file]
+ files.append((len(files), True, True, source_new, comment_new))
+ self.new_repos.add(file)
+
+ def add_source(self, line, comment='', file=None):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+
+ # Prefer separate files for new sources.
+ self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
+
+ def _remove_valid_source(self, source):
+ # If we have more than one entry, we will remove them all (not comment, remove!)
+ for filename, n, enabled, src, comment in self:
+ if source == src and enabled:
+ self.files[filename].pop(n)
+
+ def remove_source(self, line):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+
+class UbuntuSourcesList(SourcesList):
+
+ LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
+
+ def __init__(self, module):
+ self.module = module
+ self.codename = module.params['codename'] or distro.codename
+ super(UbuntuSourcesList, self).__init__(module)
+
+ self.apt_key_bin = self.module.get_bin_path('apt-key', required=False)
+ self.gpg_bin = self.module.get_bin_path('gpg', required=False)
+ if not self.apt_key_bin and not self.gpg_bin:
+ self.module.fail_json(msg='Either apt-key or gpg binary is required, but neither could be found')
+
+ def __deepcopy__(self, memo=None):
+ return UbuntuSourcesList(self.module)
+
+ def _get_ppa_info(self, owner_name, ppa_name):
+ lp_api = self.LP_API % (owner_name, ppa_name)
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(self.module, lp_api, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
+ return json.loads(to_native(response.read()))
+
+ def _expand_ppa(self, path):
+ ppa = path.split(':')[1]
+ ppa_owner = ppa.split('/')[0]
+ try:
+ ppa_name = ppa.split('/')[1]
+ except IndexError:
+ ppa_name = 'ppa'
+
+ line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
+ return line, ppa_owner, ppa_name
+
+ def _key_already_exists(self, key_fingerprint):
+
+ if self.apt_key_bin:
+ rc, out, err = self.module.run_command([self.apt_key_bin, 'export', key_fingerprint], check_rc=True)
+ found = len(err) == 0
+ else:
+ found = self._gpg_key_exists(key_fingerprint)
+
+ return found
+
+ def _gpg_key_exists(self, key_fingerprint):
+
+ found = False
+ keyfiles = ['/etc/apt/trusted.gpg'] # main gpg repo for apt
+ for other_dir in APT_KEY_DIRS:
+ # add other known sources of gpg sigs for apt, skip hidden files
+ keyfiles.extend([os.path.join(other_dir, x) for x in os.listdir(other_dir) if not x.startswith('.')])
+
+ for key_file in keyfiles:
+
+ if os.path.exists(key_file):
+ try:
+ rc, out, err = self.module.run_command([self.gpg_bin, '--list-packets', key_file])
+ except (IOError, OSError) as e:
+ self.debug("Could check key against file %s: %s" % (key_file, to_native(e)))
+ continue
+
+ if key_fingerprint in out:
+ found = True
+ break
+
+ return found
+
+ # https://www.linuxuprising.com/2021/01/apt-key-is-deprecated-how-to-add.html
+ def add_source(self, line, comment='', file=None):
+ if line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(line)
+
+ if source in self.repos_urls:
+ # repository already exists
+ return
+
+ info = self._get_ppa_info(ppa_owner, ppa_name)
+
+ # add gpg sig if needed
+ if not self._key_already_exists(info['signing_key_fingerprint']):
+
+ # TODO: report file that would have been added if not check_mode
+ keyfile = ''
+ if not self.module.check_mode:
+ if self.apt_key_bin:
+ command = [self.apt_key_bin, 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80',
+ info['signing_key_fingerprint']]
+ else:
+ # use first available key dir, in order of preference
+ for keydir in APT_KEY_DIRS:
+ if os.path.exists(keydir):
+ break
+ else:
+ self.module.fail_json("Unable to find any existing apt gpgp repo directories, tried the following: %s" % ', '.join(APT_KEY_DIRS))
+
+ keyfile = '%s/%s-%s-%s.gpg' % (keydir, os.path.basename(source).replace(' ', '-'), ppa_owner, ppa_name)
+ command = [self.gpg_bin, '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--export', info['signing_key_fingerprint']]
+
+ rc, stdout, stderr = self.module.run_command(command, check_rc=True, encoding=None)
+ if keyfile:
+ # using gpg we must write keyfile ourselves
+ if len(stdout) == 0:
+ self.module.fail_json(msg='Unable to get required signing key', rc=rc, stderr=stderr, command=command)
+ try:
+ with open(keyfile, 'wb') as f:
+ f.write(stdout)
+ self.module.log('Added repo key "%s" for apt to file "%s"' % (info['signing_key_fingerprint'], keyfile))
+ except (OSError, IOError) as e:
+ self.module.fail_json(msg='Unable to add required signing key for%s ', rc=rc, stderr=stderr, error=to_native(e))
+
+ # apt source file
+ file = file or self._suggest_filename('%s_%s' % (line, self.codename))
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ file = file or self._suggest_filename(source)
+
+ self._add_valid_source(source, comment, file)
+
+ def remove_source(self, line):
+ if line.startswith('ppa:'):
+ source = self._expand_ppa(line)[0]
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+ @property
+ def repos_urls(self):
+ _repositories = []
+ for parsed_repos in self.files.values():
+ for parsed_repo in parsed_repos:
+ valid = parsed_repo[1]
+ enabled = parsed_repo[2]
+ source_line = parsed_repo[3]
+
+ if not valid or not enabled:
+ continue
+
+ if source_line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(source_line)
+ _repositories.append(source)
+ else:
+ _repositories.append(source_line)
+
+ return _repositories
+
+
+def revert_sources_list(sources_before, sources_after, sourceslist_before):
+ '''Revert the sourcelist files to their previous state.'''
+
+ # First remove any new files that were created:
+ for filename in set(sources_after.keys()).difference(sources_before.keys()):
+ if os.path.exists(filename):
+ os.remove(filename)
+ # Now revert the existing files to their former state:
+ sourceslist_before.save()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ mode=dict(type='raw'),
+ update_cache=dict(type='bool', default=True, aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ filename=dict(type='str'),
+ # This should not be needed, but exists as a failsafe
+ install_python_apt=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ codename=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ repo = module.params['repo']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+ # Note: mode is referenced in SourcesList class via the passed in module (self here)
+
+ sourceslist = None
+
+ if not HAVE_PYTHON_APT:
+ # This interpreter can't see the apt Python library- we'll do the following to try and fix that:
+ # 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
+ # 2) finding none, try to install a matching python-apt package for the current interpreter version;
+ # we limit to the current interpreter version to try and avoid installing a whole other Python just
+ # for apt support
+ # 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
+ # the current interpreter again, but we'll let it respawn anyway for simplicity)
+ # 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
+ # made any more complex than it already is to try and cover more, eg, custom interpreters taking over
+ # system locations)
+
+ apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
+
+ if has_respawned():
+ # this shouldn't be possible; short-circuit early if it happens...
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+
+ # don't make changes if we're in check_mode
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % apt_pkg_name)
+
+ if params['install_python_apt']:
+ install_python_apt(module, apt_pkg_name)
+ else:
+ module.fail_json(msg='%s is not installed, and install_python_apt is False' % apt_pkg_name)
+
+ # try again to find the bindings in common places
+ interpreter = probe_interpreters_for_module(interpreters, 'apt')
+
+ if interpreter:
+ # found the Python bindings; respawn this module under the interpreter where we found them
+ # NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
+ respawn_module(interpreter)
+ # this is the end of the line for this process, it will exit here once the respawned module has completed
+ else:
+ # we've done all we can do; just tell the user it's busted and get out
+ module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
+
+ if not repo:
+ module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
+
+ if isinstance(distro, aptsources_distro.Distribution):
+ sourceslist = UbuntuSourcesList(module)
+ else:
+ module.fail_json(msg='Module apt_repository is not supported on target.')
+
+ sourceslist_before = copy.deepcopy(sourceslist)
+ sources_before = sourceslist.dump()
+
+ try:
+ if state == 'present':
+ sourceslist.add_source(repo)
+ elif state == 'absent':
+ sourceslist.remove_source(repo)
+ except InvalidSource as ex:
+ module.fail_json(msg='Invalid repository string: %s' % to_native(ex))
+
+ sources_after = sourceslist.dump()
+ changed = sources_before != sources_after
+
+ if changed and module._diff:
+ diff = []
+ for filename in set(sources_before.keys()).union(sources_after.keys()):
+ diff.append({'before': sources_before.get(filename, ''),
+ 'after': sources_after.get(filename, ''),
+ 'before_header': (filename, '/dev/null')[filename not in sources_before],
+ 'after_header': (filename, '/dev/null')[filename not in sources_after]})
+ else:
+ diff = {}
+
+ if changed and not module.check_mode:
+ try:
+ sourceslist.save()
+ if update_cache:
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ cache = apt.Cache()
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff with a max fail count, plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ except (OSError, IOError) as ex:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg=to_native(ex))
+
+ module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
new file mode 100644
index 0000000..2b443ce
--- /dev/null
+++ b/lib/ansible/modules/assemble.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assemble
+short_description: Assemble configuration files from fragments
+description:
+- Assembles a configuration file from fragments.
+- Often a particular program will take a single configuration file and does not support a
+ C(conf.d) style structure where it is easy to build up the configuration
+ from multiple sources. C(assemble) will take a directory of files that can be
+ local or have already been transferred to the system, and concatenate them
+ together to produce a destination file.
+- Files are assembled in string sorting order.
+- Puppet calls this idea I(fragments).
+version_added: '0.5'
+options:
+ src:
+ description:
+ - An already existing directory full of source files.
+ type: path
+ required: true
+ dest:
+ description:
+ - A file to create using the concatenation of all of the source files.
+ type: path
+ required: true
+ backup:
+ description:
+ - Create a backup file (if C(true)), including the timestamp information so
+ you can get the original file back if you somehow clobbered it
+ incorrectly.
+ type: bool
+ default: no
+ delimiter:
+ description:
+ - A delimiter to separate the file contents.
+ type: str
+ version_added: '1.4'
+ remote_src:
+ description:
+ - If C(false), it will search for src at originating/master machine.
+ - If C(true), it will go to the remote/target machine for the src.
+ type: bool
+ default: yes
+ version_added: '1.4'
+ regexp:
+ description:
+ - Assemble files only if C(regex) matches the filename.
+ - If not set, all files are assembled.
+ - Every C(\) (backslash) must be escaped as C(\\) to comply to YAML syntax.
+ - Uses L(Python regular expressions,https://docs.python.org/3/library/re.html).
+ type: str
+ ignore_hidden:
+ description:
+ - A boolean that controls if files that start with a '.' will be included or not.
+ type: bool
+ default: no
+ version_added: '2.0'
+ validate:
+ description:
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The command is passed securely so shell features like expansion and pipes won't work.
+ type: str
+ version_added: '2.0'
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+ version_added: '2.2'
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.template
+- module: ansible.windows.win_copy
+author:
+- Stephen Fromm (@sfromm)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+ - action_common_attributes.files
+ - decrypt
+ - files
+'''
+
+EXAMPLES = r'''
+- name: Assemble from fragments from a directory
+ ansible.builtin.assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+
+- name: Insert the provided delimiter between fragments
+ ansible.builtin.assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+ delimiter: '### START FRAGMENT ###'
+
+- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
+ ansible.builtin.assemble:
+ src: /etc/ssh/conf.d/
+ dest: /etc/ssh/sshd_config
+ validate: /usr/sbin/sshd -t -f %s
+'''
+
+RETURN = r'''#'''
+
+import codecs
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import b, indexbytes
+from ansible.module_utils._text import to_native
+
+
+def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
+ ''' assemble a file from a directory of fragments '''
+ tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in sorted(os.listdir(src_path)):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = os.path.join(src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+ with open(fragment, 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b('\n'))
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+
+ # byte indexing differs on Python 2 and 3,
+ # use indexbytes for compat
+ # chr(10) == '\n'
+ if indexbytes(delimiter, -1) != 10:
+ tmp.write(b('\n'))
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b('\n')):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+
+def cleanup(path, result=None):
+ # cleanup just in case
+ if os.path.exists(path):
+ try:
+ os.remove(path)
+ except (IOError, OSError) as e:
+ # don't error on possible race conditions, but keep warning
+ if result is not None:
+ result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
+
+
+def main():
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ delimiter=dict(type='str'),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ remote_src=dict(type='bool', default=True),
+ regexp=dict(type='str'),
+ ignore_hidden=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ add_file_common_args=True,
+ )
+
+ changed = False
+ path_hash = None
+ dest_hash = None
+ src = module.params['src']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ delimiter = module.params['delimiter']
+ regexp = module.params['regexp']
+ compiled_regexp = None
+ ignore_hidden = module.params['ignore_hidden']
+ validate = module.params.get('validate', None)
+
+ result = dict(src=src, dest=dest)
+ if not os.path.exists(src):
+ module.fail_json(msg="Source (%s) does not exist" % src)
+
+ if not os.path.isdir(src):
+ module.fail_json(msg="Source (%s) is not a directory" % src)
+
+ if regexp is not None:
+ try:
+ compiled_regexp = re.compile(regexp)
+ except re.error as e:
+ module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
+
+ if validate and "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % validate)
+
+ path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
+ path_hash = module.sha1(path)
+ result['checksum'] = path_hash
+
+ # Backwards compat. This won't return data if FIPS mode is active
+ try:
+ pathmd5 = module.md5(path)
+ except ValueError:
+ pathmd5 = None
+ result['md5sum'] = pathmd5
+
+ if os.path.exists(dest):
+ dest_hash = module.sha1(dest)
+
+ if path_hash != dest_hash:
+ if validate:
+ (rc, out, err) = module.run_command(validate % path)
+ result['validation'] = dict(rc=rc, stdout=out, stderr=err)
+ if rc != 0:
+ cleanup(path)
+ module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
+ if backup and dest_hash is not None:
+ result['backup_file'] = module.backup_local(dest)
+
+ module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
+ changed = True
+
+ cleanup(path, result)
+
+ # handle file permissions
+ file_args = module.load_file_common_arguments(module.params)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Mission complete
+ result['msg'] = "OK"
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py
new file mode 100644
index 0000000..0ef5eb0
--- /dev/null
+++ b/lib/ansible/modules/assert.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assert
+short_description: Asserts given expressions are true
+description:
+ - This module asserts that given expressions are true with an optional custom message.
+ - This module is also supported for Windows targets.
+version_added: "1.5"
+options:
+ that:
+ description:
+ - A list of string expressions of the same form that can be passed to the 'when' statement.
+ type: list
+ elements: str
+ required: true
+ fail_msg:
+ description:
+ - The customized message used for a failing assertion.
+ - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
+ type: str
+ aliases: [ msg ]
+ version_added: "2.7"
+ success_msg:
+ description:
+ - The customized message used for a successful assertion.
+ type: str
+ version_added: "2.7"
+ quiet:
+ description:
+ - Set this to C(true) to avoid verbose output.
+ type: bool
+ default: no
+ version_added: "2.8"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+seealso:
+- module: ansible.builtin.debug
+- module: ansible.builtin.fail
+- module: ansible.builtin.meta
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- ansible.builtin.assert: { that: "ansible_os_family != 'RedHat'" }
+
+- ansible.builtin.assert:
+ that:
+ - "'foo' in some_command_result.stdout"
+ - number_of_the_counting == 3
+
+- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ fail_msg: "'my_param' must be between 0 and 100"
+ success_msg: "'my_param' is between 0 and 100"
+
+- name: Please use 'msg' when ansible version is smaller than 2.7
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ msg: "'my_param' must be between 0 and 100"
+
+- name: Use quiet to avoid verbose output
+ ansible.builtin.assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ quiet: true
+'''
diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py
new file mode 100644
index 0000000..3609c46
--- /dev/null
+++ b/lib/ansible/modules/async_status.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: async_status
+short_description: Obtain status of asynchronous task
+description:
+- This module gets the status of an asynchronous task.
+- This module is also supported for Windows targets.
+version_added: "0.5"
+options:
+ jid:
+ description:
+ - Job or task identifier
+ type: str
+ required: true
+ mode:
+ description:
+ - If C(status), obtain the status.
+ - If C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid).
+ type: str
+ choices: [ cleanup, status ]
+ default: status
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ bypass_host_loop:
+ support: none
+ platform:
+ support: full
+ platforms: posix, windows
+seealso:
+- ref: playbooks_async
+ description: Detailed information on how to use asynchronous actions and polling.
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+---
+- name: Asynchronous yum task
+ ansible.builtin.yum:
+ name: docker-io
+ state: present
+ async: 1000
+ poll: 0
+ register: yum_sleeper
+
+- name: Wait for asynchronous job to end
+ ansible.builtin.async_status:
+ jid: '{{ yum_sleeper.ansible_job_id }}'
+ register: job_result
+ until: job_result.finished
+ retries: 100
+ delay: 10
+'''
+
+RETURN = r'''
+ansible_job_id:
+ description: The asynchronous job id
+ returned: success
+ type: str
+ sample: '360874038559.4169'
+finished:
+ description: Whether the asynchronous job has finished (C(1)) or not (C(0))
+ returned: always
+ type: int
+ sample: 1
+started:
+ description: Whether the asynchronous job has started (C(1)) or not (C(0))
+ returned: always
+ type: int
+ sample: 1
+stdout:
+ description: Any output returned by async_wrapper
+ returned: always
+ type: str
+stderr:
+ description: Any errors returned by async_wrapper
+ returned: always
+ type: str
+erased:
+ description: Path to erased job file
+ returned: when file is erased
+ type: str
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ jid=dict(type='str', required=True),
+ mode=dict(type='str', default='status', choices=['cleanup', 'status']),
+ # passed in from the async_status action plugin
+ _async_dir=dict(type='path', required=True),
+ ))
+
+ mode = module.params['mode']
+ jid = module.params['jid']
+ async_dir = module.params['_async_dir']
+
+ # setup logging directory
+ logdir = os.path.expanduser(async_dir)
+ log_path = os.path.join(logdir, jid)
+
+ if not os.path.exists(log_path):
+ module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
+
+ if mode == 'cleanup':
+ os.unlink(log_path)
+ module.exit_json(ansible_job_id=jid, erased=log_path)
+
+ # NOT in cleanup mode, assume regular status mode
+ # no remote kill mode currently exists, but probably should
+ # consider log_path + ".pid" file and also unlink that above
+
+ data = None
+ try:
+ with open(log_path) as f:
+ data = json.loads(f.read())
+ except Exception:
+ if not data:
+ # file not written yet? That means it is running
+ module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
+ else:
+ module.fail_json(ansible_job_id=jid, results_file=log_path,
+ msg="Could not parse job output: %s" % data, started=1, finished=1)
+
+ if 'started' not in data:
+ data['finished'] = 1
+ data['ansible_job_id'] = jid
+ elif 'finished' not in data:
+ data['finished'] = 0
+
+ # Fix error: TypeError: exit_json() keywords must be strings
+ data = {to_native(k): v for k, v in iteritems(data)}
+
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py
new file mode 100644
index 0000000..4b1a5b3
--- /dev/null
+++ b/lib/ansible/modules/async_wrapper.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import errno
+import json
+import shlex
+import shutil
+import os
+import subprocess
+import sys
+import traceback
+import signal
+import time
+import syslog
+import multiprocessing
+
+from ansible.module_utils._text import to_text, to_bytes
+
+PY3 = sys.version_info[0] == 3
+
+syslog.openlog('ansible-%s' % os.path.basename(__file__))
+syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
+
+# pipe for communication between forked process and parent
+ipc_watcher, ipc_notifier = multiprocessing.Pipe()
+
+job_path = ''
+
+
+def notice(msg):
+ syslog.syslog(syslog.LOG_NOTICE, msg)
+
+
+def end(res=None, exit_msg=0):
+ if res is not None:
+ print(json.dumps(res))
+ sys.stdout.flush()
+ sys.exit(exit_msg)
+
+
+def daemonize_self():
+ # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # exit first parent
+ end()
+ except OSError:
+ e = sys.exc_info()[1]
+ end({'msg': "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
+
+ # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
+ os.setsid()
+ os.umask(int('022', 8))
+
+ # do second fork
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # TODO: print 'async_wrapper_pid': pid, but careful as it will pollute expected output.
+ end()
+ except OSError:
+ e = sys.exc_info()[1]
+ end({'msg': "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror), 'failed': True}, 1)
+
+ dev_null = open('/dev/null', 'w')
+ os.dup2(dev_null.fileno(), sys.stdin.fileno())
+ os.dup2(dev_null.fileno(), sys.stdout.fileno())
+ os.dup2(dev_null.fileno(), sys.stderr.fileno())
+
+
+# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
+# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
+def _filter_non_json_lines(data):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(u'}'):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
+
+
+def _get_interpreter(module_path):
+ with open(module_path, 'rb') as module_fd:
+ head = module_fd.read(1024)
+ if head[0:2] != b'#!':
+ return None
+ return head[2:head.index(b'\n')].strip().split(b' ')
+
+
+def _make_temp_dir(path):
+ # TODO: Add checks for permissions on path.
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def jwrite(info):
+
+ jobfile = job_path + ".tmp"
+ tjob = open(jobfile, "w")
+ try:
+ tjob.write(json.dumps(info))
+ except (IOError, OSError) as e:
+ notice('failed to write to %s: %s' % (jobfile, str(e)))
+ raise e
+ finally:
+ tjob.close()
+ os.rename(jobfile, job_path)
+
+
+def _run_module(wrapped_cmd, jid):
+
+ jwrite({"started": 1, "finished": 0, "ansible_job_id": jid})
+
+ result = {}
+
+ # signal grandchild process started and isolated from being terminated
+ # by the connection being closed sending a signal to the job group
+ ipc_notifier.send(True)
+ ipc_notifier.close()
+
+ outdata = ''
+ filtered_outdata = ''
+ stderr = ''
+ try:
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(wrapped_cmd)]
+ # call the module interpreter directly (for non-binary modules)
+ # this permits use of a script for an interpreter on non-Linux platforms
+ interpreter = _get_interpreter(cmd[0])
+ if interpreter:
+ cmd = interpreter + cmd
+ script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ (outdata, stderr) = script.communicate()
+ if PY3:
+ outdata = outdata.decode('utf-8', 'surrogateescape')
+ stderr = stderr.decode('utf-8', 'surrogateescape')
+
+ (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
+
+ result = json.loads(filtered_outdata)
+
+ if json_warnings:
+ # merge JSON junk warnings with any existing module warnings
+ module_warnings = result.get('warnings', [])
+ if not isinstance(module_warnings, list):
+ module_warnings = [module_warnings]
+ module_warnings.extend(json_warnings)
+ result['warnings'] = module_warnings
+
+ if stderr:
+ result['stderr'] = stderr
+ jwrite(result)
+
+ except (OSError, IOError):
+ e = sys.exc_info()[1]
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "msg": to_text(e),
+ "outdata": outdata, # temporary notice only
+ "stderr": stderr
+ }
+ result['ansible_job_id'] = jid
+ jwrite(result)
+
+ except (ValueError, Exception):
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "data": outdata, # temporary notice only
+ "stderr": stderr,
+ "msg": traceback.format_exc()
+ }
+ result['ansible_job_id'] = jid
+ jwrite(result)
+
+
+def main():
+ if len(sys.argv) < 5:
+ end({
+ "failed": True,
+ "msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
+ "Humans, do not call directly!"
+ }, 1)
+
+ jid = "%s.%d" % (sys.argv[1], os.getpid())
+ time_limit = sys.argv[2]
+ wrapped_module = sys.argv[3]
+ argsfile = sys.argv[4]
+ if '-tmp-' not in os.path.dirname(wrapped_module):
+ preserve_tmp = True
+ elif len(sys.argv) > 5:
+ preserve_tmp = sys.argv[5] == '-preserve_tmp'
+ else:
+ preserve_tmp = False
+ # consider underscore as no argsfile so we can support passing of additional positional parameters
+ if argsfile != '_':
+ cmd = "%s %s" % (wrapped_module, argsfile)
+ else:
+ cmd = wrapped_module
+ step = 5
+
+ async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
+
+ # setup job output directory
+ jobdir = os.path.expanduser(async_dir)
+ global job_path
+ job_path = os.path.join(jobdir, jid)
+
+ try:
+ _make_temp_dir(jobdir)
+ except Exception as e:
+ end({
+ "failed": 1,
+ "msg": "could not create directory: %s - %s" % (jobdir, to_text(e)),
+ "exception": to_text(traceback.format_exc()),
+ }, 1)
+
+ # immediately exit this process, leaving an orphaned process
+ # running which immediately forks a supervisory timing process
+
+ try:
+ pid = os.fork()
+ if pid:
+ # Notify the overlord that the async process started
+
+ # we need to not return immediately such that the launched command has an attempt
+ # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
+ # this probably could be done with some IPC later. Modules should always read
+ # the argsfile at the very first start of their execution anyway
+
+ # close off notifier handle in grandparent, probably unnecessary as
+ # this process doesn't hang around long enough
+ ipc_notifier.close()
+
+ # allow waiting up to 2.5 seconds in total should be long enough for worst
+ # loaded environment in practice.
+ retries = 25
+ while retries > 0:
+ if ipc_watcher.poll(0.1):
+ break
+ else:
+ retries = retries - 1
+ continue
+
+ notice("Return async_wrapper task started.")
+ end({"failed": 0, "started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
+ "_ansible_suppress_tmpdir_delete": (not preserve_tmp)}, 0)
+ else:
+ # The actual wrapper process
+
+ # close off the receiving end of the pipe from child process
+ ipc_watcher.close()
+
+ # Daemonize, so we keep on running
+ daemonize_self()
+
+ # we are now daemonized, create a supervisory process
+ notice("Starting module and watcher")
+
+ sub_pid = os.fork()
+ if sub_pid:
+ # close off inherited pipe handles
+ ipc_watcher.close()
+ ipc_notifier.close()
+
+ # the parent stops the process after the time limit
+ remaining = int(time_limit)
+
+ # set the child process group id to kill all children
+ os.setpgid(sub_pid, sub_pid)
+
+ notice("Start watching %s (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
+ notice("%s still running (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ remaining = remaining - step
+ if remaining <= 0:
+ # ensure we leave response in poll location
+ res = {'msg': 'Timeout exceeded', 'failed': True, 'child_pid': sub_pid}
+ jwrite(res)
+
+ # actually kill it
+ notice("Timeout reached, now killing %s" % (sub_pid))
+ os.killpg(sub_pid, signal.SIGKILL)
+ notice("Sent kill to group %s " % sub_pid)
+ time.sleep(1)
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ end(res)
+ notice("Done in kid B.")
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ end()
+ else:
+ # the child process runs the actual module
+ notice("Start module (%s)" % os.getpid())
+ _run_module(cmd, jid)
+ notice("Module complete (%s)" % os.getpid())
+
+ except Exception as e:
+ notice("error: %s" % e)
+ end({"failed": True, "msg": "FATAL ERROR: %s" % e}, "async_wrapper exited prematurely")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
new file mode 100644
index 0000000..63fc021
--- /dev/null
+++ b/lib/ansible/modules/blockinfile.py
@@ -0,0 +1,387 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: blockinfile
+short_description: Insert/update/remove a text block surrounded by marker lines
+version_added: '2.0'
+description:
+- This module will insert/update/remove a block of multi-line text surrounded by customizable marker lines.
+author:
+- Yaegashi Takeshi (@yaegashi)
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: yes
+ aliases: [ dest, destfile, name ]
+ state:
+ description:
+ - Whether the block should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ marker:
+ description:
+ - The marker line template.
+ - C({mark}) will be replaced with the values in C(marker_begin) (default="BEGIN") and C(marker_end) (default="END").
+ - Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
+ - Multi-line markers are not supported and will result in the block being repeatedly inserted on subsequent playbook runs.
+ - A newline is automatically appended by the module to C(marker_begin) and C(marker_end).
+ type: str
+ default: '# {mark} ANSIBLE MANAGED BLOCK'
+ block:
+ description:
+ - The text to insert inside the marker lines.
+ - If it is missing or an empty string, the block will be removed as if C(state) were specified to C(absent).
+ type: str
+ default: ''
+ aliases: [ content ]
+ insertafter:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted after the last match of specified regular expression.
+ - A special value is available; C(EOF) for inserting the block at the end of the file.
+ - If specified regular expression has no matches, C(EOF) will be used instead.
+ - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
+ This behaviour was added in ansible-core 2.14.
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted before the last match of specified regular expression.
+ - A special value is available; C(BOF) for inserting the block at the beginning of the file.
+ - If specified regular expression has no matches, the block will be inserted at the end of the file.
+ - The presence of the multiline flag (?m) in the regular expression controls whether the match is done line by line or with multiple lines.
+ This behaviour was added in ansible-core 2.14.
+ type: str
+ choices: [ BOF, '*regex*' ]
+ create:
+ description:
+ - Create a new file if it does not exist.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ marker_begin:
+ description:
+ - This will be inserted at C({mark}) in the opening ansible block marker.
+ type: str
+ default: BEGIN
+ version_added: '2.5'
+ marker_end:
+ required: false
+ description:
+ - This will be inserted at C({mark}) in the closing ansible block marker.
+ type: str
+ default: END
+ version_added: '2.5'
+notes:
+ - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+ - When more then one block should be handled in one file you must change the I(marker) per task.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ safe_file_operations:
+ support: full
+ platform:
+ support: full
+ platforms: posix
+ vault:
+ support: none
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path'
+- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config
+ ansible.builtin.blockinfile:
+ path: /etc/ssh/sshd_config
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+
+- name: Insert/Update eth0 configuration stanza in /etc/network/interfaces
+ (it might be better to copy files into /etc/network/interfaces.d/)
+ ansible.builtin.blockinfile:
+ path: /etc/network/interfaces
+ block: |
+ iface eth0 inet static
+ address 192.0.2.23
+ netmask 255.255.255.0
+
+- name: Insert/Update configuration using a local file and validate it
+ ansible.builtin.blockinfile:
+ block: "{{ lookup('ansible.builtin.file', './local/sshd_config') }}"
+ path: /etc/ssh/sshd_config
+ backup: yes
+ validate: /usr/sbin/sshd -T -f %s
+
+- name: Insert/Update HTML surrounded by custom markers after <body> line
+ ansible.builtin.blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ insertafter: "<body>"
+ block: |
+ <h1>Welcome to {{ ansible_hostname }}</h1>
+ <p>Last updated on {{ ansible_date_time.iso8601 }}</p>
+
+- name: Remove HTML as well as surrounding markers
+ ansible.builtin.blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ block: ""
+
+- name: Add mappings to /etc/hosts
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ block: |
+ {{ item.ip }} {{ item.name }}
+ marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
+ loop:
+ - { name: host1, ip: 10.10.1.10 }
+ - { name: host2, ip: 10.10.1.11 }
+ - { name: host3, ip: 10.10.1.12 }
+
+- name: Search with a multiline search flags regex and if found insert after
+ blockinfile:
+ path: listener.ora
+ block: "{{ listener_line | indent(width=8, first=True) }}"
+ insertafter: '(?m)SID_LIST_LISTENER_DG =\n.*\(SID_LIST ='
+ marker: " <!-- {mark} ANSIBLE MANAGED BLOCK -->"
+
+'''
+
+import re
+import os
+import tempfile
+from ansible.module_utils.six import b
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'),
+ block=dict(type='str', default='', aliases=['content']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ marker_begin=dict(type='str', default='BEGIN'),
+ marker_end=dict(type='str', default='END'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ params = module.params
+ path = params['path']
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256,
+ msg='Path %s is a directory !' % path)
+
+ path_exists = os.path.exists(path)
+ if not path_exists:
+ if not module.boolean(params['create']):
+ module.fail_json(rc=257,
+ msg='Path %s does not exist !' % path)
+ destpath = os.path.dirname(path)
+ if not os.path.exists(destpath) and not module.check_mode:
+ try:
+ os.makedirs(destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1]))
+ original = None
+ lines = []
+ else:
+ with open(path, 'rb') as f:
+ original = f.read()
+ lines = original.splitlines(True)
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % path,
+ 'after_header': '%s (content)' % path}
+
+ if module._diff and original:
+ diff['before'] = original
+
+ insertbefore = params['insertbefore']
+ insertafter = params['insertafter']
+ block = to_bytes(params['block'])
+ marker = to_bytes(params['marker'])
+ present = params['state'] == 'present'
+
+ if not present and not path_exists:
+ module.exit_json(changed=False, msg="File %s not present" % path)
+
+ if insertbefore is None and insertafter is None:
+ insertafter = 'EOF'
+
+ if insertafter not in (None, 'EOF'):
+ insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ insertre = None
+
+ marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) + b(os.linesep)
+ marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) + b(os.linesep)
+ if present and block:
+ if not block.endswith(b(os.linesep)):
+ block += b(os.linesep)
+ blocklines = [marker0] + block.splitlines(True) + [marker1]
+ else:
+ blocklines = []
+
+ n0 = n1 = None
+ for i, line in enumerate(lines):
+ if line == marker0:
+ n0 = i
+ if line == marker1:
+ n1 = i
+
+ if None in (n0, n1):
+ n0 = None
+ if insertre is not None:
+ if insertre.flags & re.MULTILINE:
+ match = insertre.search(original)
+ if match:
+ if insertafter:
+ n0 = to_native(original).count('\n', 0, match.end())
+ elif insertbefore:
+ n0 = to_native(original).count('\n', 0, match.start())
+ else:
+ for i, line in enumerate(lines):
+ if insertre.search(line):
+ n0 = i
+ if n0 is None:
+ n0 = len(lines)
+ elif insertafter is not None:
+ n0 += 1
+ elif insertbefore is not None:
+ n0 = 0 # insertbefore=BOF
+ else:
+ n0 = len(lines) # insertafter=EOF
+ elif n0 < n1:
+ lines[n0:n1 + 1] = []
+ else:
+ lines[n1:n0 + 1] = []
+ n0 = n1
+
+ # Ensure there is a line separator before the block of lines to be inserted
+ if n0 > 0:
+ if not lines[n0 - 1].endswith(b(os.linesep)):
+ lines[n0 - 1] += b(os.linesep)
+
+ lines[n0:n0] = blocklines
+ if lines:
+ result = b''.join(lines)
+ else:
+ result = b''
+
+ if module._diff:
+ diff['after'] = result
+
+ if original == result:
+ msg = ''
+ changed = False
+ elif original is None:
+ msg = 'File created'
+ changed = True
+ elif not blocklines:
+ msg = 'Block removed'
+ changed = True
+ else:
+ msg = 'Block inserted'
+ changed = True
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if module.boolean(params['backup']) and path_exists:
+ backup_file = module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ real_path = os.path.realpath(params['path'])
+ write_changes(module, result, real_path)
+
+ if module.check_mode and not path_exists:
+ module.exit_json(changed=changed, msg=msg, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % path
+ attr_diff['after_header'] = '%s (file attributes)' % path
+
+ difflist = [diff, attr_diff]
+
+ if backup_file is None:
+ module.exit_json(changed=changed, msg=msg, diff=difflist)
+ else:
+ module.exit_json(changed=changed, msg=msg, diff=difflist, backup_file=backup_file)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
new file mode 100644
index 0000000..490c0ca
--- /dev/null
+++ b/lib/ansible/modules/command.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: command
+short_description: Execute commands on targets
+version_added: historical
+description:
+ - The C(command) module takes the command name followed by a list of space-delimited arguments.
+ - The given command will be executed on all selected nodes.
+ - The command(s) will not be
+ processed through the shell, so variables like C($HOSTNAME) and operations
+ like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
+ Use the M(ansible.builtin.shell) module if you need these features.
+ - To create C(command) tasks that are easier to read than the ones using space-delimited
+ arguments, pass parameters using the C(args) L(task keyword,https://docs.ansible.com/ansible/latest/reference_appendices/playbooks_keywords.html#task)
+ or use C(cmd) parameter.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+ raw:
+ support: full
+options:
+ free_form:
+ description:
+ - The command module takes a free form string as a command to run.
+ - There is no actual parameter named 'free form'.
+ cmd:
+ type: str
+ description:
+ - The command to run.
+ argv:
+ type: list
+ elements: str
+ description:
+ - Passes the command as a list rather than a string.
+ - Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
+ - Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
+ version_added: "2.6"
+ creates:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(will not) be run.
+ - This is checked before I(removes) is checked.
+ removes:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
+ - This is checked after I(creates) is checked.
+ version_added: "0.8"
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ version_added: "0.6"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: "2.4"
+ stdin_add_newline:
+ type: bool
+ default: yes
+ description:
+ - If set to C(true), append a newline to stdin data.
+ version_added: "2.8"
+ strip_empty_ends:
+ description:
+ - Strip empty lines from the end of stdout/stderr in result.
+ version_added: "2.8"
+ type: bool
+ default: yes
+notes:
+ - If you want to run a command through the shell (say you are using C(<), C(>), C(|), and so on),
+ you actually want the M(ansible.builtin.shell) module instead.
+ Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
+ use the C(command) module when possible.
+ - C(creates), C(removes), and C(chdir) can be specified after the command.
+ For instance, if you only want to run a command if a certain file does not exist, use this.
+ - Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
+ check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
+ - The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Return motd to registered var
+ ansible.builtin.command: cat /etc/motd
+ register: mymotd
+
+# free-form (string) arguments, all arguments on one line
+- name: Run command if /path/to/database does not exist (without 'args')
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
+
+# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
+# 'args' is a task keyword, passed at the same level as the module
+- name: Run command if /path/to/database does not exist (with 'args' keyword)
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
+ args:
+ creates: /path/to/database
+
+# 'cmd' is module parameter
+- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
+ ansible.builtin.command:
+ cmd: /usr/bin/make_database.sh db_user db_name
+ creates: /path/to/database
+
+- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
+ ansible.builtin.command: /usr/bin/make_database.sh db_user db_name
+ become: yes
+ become_user: db_owner
+ args:
+ chdir: somedir/
+ creates: /path/to/database
+
+# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
+# 'argv' is a parameter, indented one level from the module
+- name: Use 'argv' to send a command as a list - leave 'command' empty
+ ansible.builtin.command:
+ argv:
+ - /usr/bin/make_database.sh
+ - Username with whitespace
+ - dbname with whitespace
+ creates: /path/to/database
+
+- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
+ ansible.builtin.command: cat {{ myfile|quote }}
+ register: myoutput
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.083128'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.084657'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.001529'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: list
+ sample:
+ - echo
+ - hello
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines.
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
+
+import datetime
+import glob
+import os
+import shlex
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native, to_bytes, to_text
+from ansible.module_utils.common.collections import is_iterable
+
+
+def main():
+
+ # the command module is the one ansible module that does not take key=value args
+ # hence don't copy this one if you are looking to build others!
+ # NOTE: ensure splitter.py is kept in sync for exceptions
+ module = AnsibleModule(
+ argument_spec=dict(
+ _raw_params=dict(),
+ _uses_shell=dict(type='bool', default=False),
+ argv=dict(type='list', elements='str'),
+ chdir=dict(type='path'),
+ executable=dict(),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ # The default for this really comes from the action plugin
+ stdin=dict(required=False),
+ stdin_add_newline=dict(type='bool', default=True),
+ strip_empty_ends=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ shell = module.params['_uses_shell']
+ chdir = module.params['chdir']
+ executable = module.params['executable']
+ args = module.params['_raw_params']
+ argv = module.params['argv']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ stdin = module.params['stdin']
+ stdin_add_newline = module.params['stdin_add_newline']
+ strip = module.params['strip_empty_ends']
+
+ # we promissed these in 'always' ( _lines get autoaded on action plugin)
+ r = {'changed': False, 'stdout': '', 'stderr': '', 'rc': None, 'cmd': None, 'start': None, 'end': None, 'delta': None, 'msg': ''}
+
+ if not shell and executable:
+ module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
+ executable = None
+
+ if (not args or args.strip() == '') and not argv:
+ r['rc'] = 256
+ r['msg'] = "no command given"
+ module.fail_json(**r)
+
+ if args and argv:
+ r['rc'] = 256
+ r['msg'] = "only command or argv can be given, not both"
+ module.fail_json(**r)
+
+ if not shell and args:
+ args = shlex.split(args)
+
+ args = args or argv
+ # All args must be strings
+ if is_iterable(args, include_strings=False):
+ args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
+
+ r['cmd'] = args
+
+ if chdir:
+ chdir = to_bytes(chdir, errors='surrogate_or_strict')
+
+ try:
+ os.chdir(chdir)
+ except (IOError, OSError) as e:
+ r['msg'] = 'Unable to change directory before execution: %s' % to_text(e)
+ module.fail_json(**r)
+
+ # check_mode partial support, since it only really works in checking creates/removes
+ if module.check_mode:
+ shoulda = "Would"
+ else:
+ shoulda = "Did"
+
+ # special skips for idempotence if file exists (assumes command creates)
+ if creates:
+ if glob.glob(creates):
+ r['msg'] = "%s not run command since '%s' exists" % (shoulda, creates)
+ r['stdout'] = "skipped, since %s exists" % creates # TODO: deprecate
+
+ r['rc'] = 0
+
+ # special skips for idempotence if file does not exist (assumes command removes)
+ if not r['msg'] and removes:
+ if not glob.glob(removes):
+ r['msg'] = "%s not run command since '%s' does not exist" % (shoulda, removes)
+ r['stdout'] = "skipped, since %s does not exist" % removes # TODO: deprecate
+ r['rc'] = 0
+
+ if r['msg']:
+ module.exit_json(**r)
+
+ r['changed'] = True
+
+ # actually executes command (or not ...)
+ if not module.check_mode:
+ r['start'] = datetime.datetime.now()
+ r['rc'], r['stdout'], r['stderr'] = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None,
+ data=stdin, binary_data=(not stdin_add_newline))
+ r['end'] = datetime.datetime.now()
+ else:
+ # this is partial check_mode support, since we end up skipping if we get here
+ r['rc'] = 0
+ r['msg'] = "Command would have run if not in check mode"
+ if creates is None and removes is None:
+ r['skipped'] = True
+ # skipped=True and changed=True are mutually exclusive
+ r['changed'] = False
+
+ # convert to text for jsonization and usability
+ if r['start'] is not None and r['end'] is not None:
+ # these are datetime objects, but need them as strings to pass back
+ r['delta'] = to_text(r['end'] - r['start'])
+ r['end'] = to_text(r['end'])
+ r['start'] = to_text(r['start'])
+
+ if strip:
+ r['stdout'] = to_text(r['stdout']).rstrip("\r\n")
+ r['stderr'] = to_text(r['stderr']).rstrip("\r\n")
+
+ if r['rc'] != 0:
+ r['msg'] = 'non-zero return code'
+ module.fail_json(**r)
+
+ module.exit_json(**r)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
new file mode 100644
index 0000000..37115fa
--- /dev/null
+++ b/lib/ansible/modules/copy.py
@@ -0,0 +1,825 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: copy
+version_added: historical
+short_description: Copy files to remote locations
+description:
+ - The C(copy) module copies a file from the local or remote machine to a location on the remote machine.
+ - Use the M(ansible.builtin.fetch) module to copy files from remote locations to the local box.
+ - If you need variable interpolation in copied files, use the M(ansible.builtin.template) module.
+ Using a variable in the C(content) field will result in unpredictable output.
+ - For Windows targets, use the M(ansible.windows.win_copy) module instead.
+options:
+ src:
+ description:
+ - Local path to a file to copy to the remote server.
+ - This can be absolute or relative.
+ - If path is a directory, it is copied recursively. In this case, if path ends
+ with "/", only inside contents of that directory are copied to destination.
+ Otherwise, if it does not end with "/", the directory itself with all contents
+ is copied. This behavior is similar to the C(rsync) command line tool.
+ type: path
+ content:
+ description:
+ - When used instead of C(src), sets the contents of a file directly to the specified value.
+ - Works only when C(dest) is a file. Creates the file if it does not exist.
+ - For advanced formatting or if C(content) contains a variable, use the
+ M(ansible.builtin.template) module.
+ type: str
+ version_added: '1.1'
+ dest:
+ description:
+ - Remote absolute path where the file should be copied to.
+ - If C(src) is a directory, this must be a directory too.
+ - If C(dest) is a non-existent path and if either C(dest) ends with "/" or C(src) is a directory, C(dest) is created.
+ - If I(dest) is a relative path, the starting directory is determined by the remote host.
+ - If C(src) and C(dest) are files, the parent directory of C(dest) is not created and the task fails if it does not already exist.
+ type: path
+ required: yes
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '0.7'
+ force:
+ description:
+ - Influence whether the remote file must always be replaced.
+ - If C(true), the remote file will be replaced when contents are different than the source.
+ - If C(false), the file will only be transferred if the destination does not exist.
+ type: bool
+ default: yes
+ version_added: '1.1'
+ mode:
+ description:
+ - The permissions of the destination file or directory.
+ - For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
+ You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
+ (like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives a string
+ and can do its own conversion from string into number. Giving Ansible a number without following
+ one of these rules will end up with a decimal number which will have unexpected results.
+ - As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
+ - As of Ansible 2.3, the mode may also be the special string C(preserve).
+ - C(preserve) means that the file will be given the same permissions as the source file.
+ - When doing a recursive copy, see also C(directory_mode).
+ - If C(mode) is not specified and the destination file B(does not) exist, the default C(umask) on the system will be used
+ when setting the mode for the newly created file.
+ - If C(mode) is not specified and the destination file B(does) exist, the mode of the existing file will be used.
+ - Specifying C(mode) is the best way to ensure files are created with the correct permissions.
+ See CVE-2020-1736 for further details.
+ directory_mode:
+ description:
+ - When doing a recursive copy set the mode for the directories.
+ - If this is not set we will use the system defaults.
+ - The mode is only set on directories which are newly created, and will not affect those that already existed.
+ type: raw
+ version_added: '1.5'
+ remote_src:
+ description:
+ - Influence whether C(src) needs to be transferred or already is present remotely.
+ - If C(false), it will search for C(src) on the controller node.
+ - If C(true) it will search for C(src) on the managed (remote) node.
+ - C(remote_src) supports recursive copying as of version 2.8.
+ - C(remote_src) only works with C(mode=preserve) as of version 2.6.
+ - Autodecryption of files does not work when C(remote_src=yes).
+ type: bool
+ default: no
+ version_added: '2.0'
+ follow:
+ description:
+ - This flag indicates that filesystem links in the destination, if they exist, should be followed.
+ type: bool
+ default: no
+ version_added: '1.8'
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree, if they exist, should be followed.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ checksum:
+ description:
+ - SHA1 checksum of the file being transferred.
+ - Used to validate that the copy of the file was successful.
+ - If this is not provided, ansible will use the local calculated checksum of the src file.
+ type: str
+ version_added: '2.5'
+extends_documentation_fragment:
+ - decrypt
+ - files
+ - validate
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.flow
+notes:
+ - The M(ansible.builtin.copy) module recursively copy facility does not scale to lots (>hundreds) of files.
+seealso:
+ - module: ansible.builtin.assemble
+ - module: ansible.builtin.fetch
+ - module: ansible.builtin.file
+ - module: ansible.builtin.template
+ - module: ansible.posix.synchronize
+ - module: ansible.windows.win_copy
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+ version_added: '2.2'
+'''
+
+EXAMPLES = r'''
+- name: Copy file with owner and permissions
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Copy file with owner and permission, using symbolic representation
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u=rw,g=r,o=r
+
+- name: Another symbolic mode example, adding some permissions and removing others
+ ansible.builtin.copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u+rw,g-wx,o-rwx
+
+- name: Copy a new "ntp.conf" file into place, backing up the original if it differs from the copied version
+ ansible.builtin.copy:
+ src: /mine/ntp.conf
+ dest: /etc/ntp.conf
+ owner: root
+ group: root
+ mode: '0644'
+ backup: yes
+
+- name: Copy a new "sudoers" file into place, after passing validation with visudo
+ ansible.builtin.copy:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy a "sudoers" file on the remote machine for editing
+ ansible.builtin.copy:
+ src: /etc/sudoers
+ dest: /etc/sudoers.edit
+ remote_src: yes
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy using inline content
+ ansible.builtin.copy:
+ content: '# This file was moved to /etc/other.conf'
+ dest: /etc/mine.conf
+
+- name: If follow=yes, /path/to/file will be overwritten by contents of foo.conf
+ ansible.builtin.copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: yes
+
+- name: If follow=no, /path/to/link will become a file and be overwritten by contents of foo.conf
+ ansible.builtin.copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: no
+'''
+
+RETURN = r'''
+dest:
+ description: Destination file/path.
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+src:
+ description: Source file used for the copy on the target machine.
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+md5sum:
+ description: MD5 checksum of the file after running copy.
+ returned: when supported
+ type: str
+ sample: 2a5aeecc61dc98c4d780b14b330e3282
+checksum:
+ description: SHA1 checksum of the file after running copy.
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+gid:
+ description: Group id of the file, after execution.
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: Group of the file, after execution.
+ returned: success
+ type: str
+ sample: httpd
+owner:
+ description: Owner of the file, after execution.
+ returned: success
+ type: str
+ sample: httpd
+uid:
+ description: Owner id of the file, after execution.
+ returned: success
+ type: int
+ sample: 100
+mode:
+ description: Permissions of the target, after execution.
+ returned: success
+ type: str
+ sample: "0644"
+size:
+ description: Size of the target, after execution.
+ returned: success
+ type: int
+ sample: 1220
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: file
+'''
+
+import errno
+import filecmp
+import grp
+import os
+import os.path
+import platform
+import pwd
+import shutil
+import stat
+import tempfile
+import traceback
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.six import PY3
+
+
+# The AnsibleModule object
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+
+# Once we get run_command moved into common, we can move this into a common/files module. We can't
+# until then because of the module.run_command() method. We may need to move it into
+# basic::AnsibleModule() until then but if so, make it a private function so that we don't have to
+# keep it for backwards compatibility later.
+def clear_facls(path):
+ setfacl = get_bin_path('setfacl')
+ # FIXME "setfacl -b" is available on Linux and FreeBSD. There is "setfacl -D e" on z/OS. Others?
+ acl_command = [setfacl, '-b', path]
+ b_acl_command = [to_bytes(x) for x in acl_command]
+ locale = get_best_parsable_locale(module)
+ rc, out, err = module.run_command(b_acl_command, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale))
+ if rc != 0:
+ raise RuntimeError('Error running "{0}": stdout: "{1}"; stderr: "{2}"'.format(' '.join(b_acl_command), out, err))
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if head == '':
+ return ('.', [tail])
+ if not os.path.exists(b_head):
+ if head == '/':
+ raise AnsibleModuleError(results={'msg': "The '/' directory doesn't exist on this machine."})
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return (head, [tail])
+ new_directory_list.append(tail)
+ return (pre_existing_dir, new_directory_list)
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+
+ if new_directory_list:
+ working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+def chown_recursive(path, module):
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+
+ if owner is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = module.set_owner_if_different(dirpath, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = module.set_owner_if_different(dir, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = module.set_owner_if_different(file, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ else:
+ uid = pwd.getpwnam(owner).pw_uid
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = (os.stat(dirpath).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = (os.stat(dir).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = (os.stat(file).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ if group is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = module.set_group_if_different(dirpath, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = module.set_group_if_different(dir, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = module.set_group_if_different(file, group, False)
+ if group_changed is True:
+ changed = group_changed
+ else:
+ gid = grp.getgrnam(group).gr_gid
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = (os.stat(dirpath).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = (os.stat(dir).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = (os.stat(file).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+
+ return changed
+
+
+def copy_diff_files(src, dest, module):
+ """Copy files that are different between `src` directory and `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ diff_files = filecmp.dircmp(src, dest).diff_files
+ if len(diff_files):
+ changed = True
+ if not module.check_mode:
+ for item in diff_files:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ if os.path.islink(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+ else:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+ changed = True
+ return changed
+
+
+def copy_left_only(src, dest, module):
+ """Copy files that exist in `src` directory only to the `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ left_only = filecmp.dircmp(src, dest).left_only
+ if len(left_only):
+ changed = True
+ if not module.check_mode:
+ for item in left_only:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True:
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
+ chown_recursive(b_dest_item_path, module)
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow)
+ chown_recursive(b_dest_item_path, module)
+
+ changed = True
+ return changed
+
+
+def copy_common_dirs(src, dest, module):
+ changed = False
+ common_dirs = filecmp.dircmp(src, dest).common_dirs
+ for item in common_dirs:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src_item_path, b_dest_item_path, module)
+ left_only_changed = copy_left_only(b_src_item_path, b_dest_item_path, module)
+ if diff_files_changed or left_only_changed:
+ changed = True
+
+ # recurse into subdirectory
+ changed = changed or copy_common_dirs(os.path.join(src, item), os.path.join(dest, item), module)
+ return changed
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path'),
+ _original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
+ content=dict(type='str', no_log=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ force=dict(type='bool', default=True),
+ validate=dict(type='str'),
+ directory_mode=dict(type='raw'),
+ remote_src=dict(type='bool'),
+ local_follow=dict(type='bool'),
+ checksum=dict(type='str'),
+ follow=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ src = module.params['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ dest = module.params['dest']
+ # Make sure we always have a directory component for later processing
+ if os.path.sep not in dest:
+ dest = '.{0}{1}'.format(os.path.sep, dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ backup = module.params['backup']
+ force = module.params['force']
+ _original_basename = module.params.get('_original_basename', None)
+ validate = module.params.get('validate', None)
+ follow = module.params['follow']
+ local_follow = module.params['local_follow']
+ mode = module.params['mode']
+ owner = module.params['owner']
+ group = module.params['group']
+ remote_src = module.params['remote_src']
+ checksum = module.params['checksum']
+
+ if not os.path.exists(b_src):
+ module.fail_json(msg="Source %s not found" % (src))
+ if not os.access(b_src, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % (src))
+
+ # Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
+ # remote host
+ if module.params['mode'] == 'preserve':
+ module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
+ mode = module.params['mode']
+
+ changed = False
+
+ checksum_dest = None
+ checksum_src = None
+ md5sum_src = None
+
+ if os.path.isfile(src):
+ try:
+ checksum_src = module.sha1(src)
+ except (OSError, IOError) as e:
+ module.warn("Unable to calculate src checksum, assuming change: %s" % to_native(e))
+ try:
+ # Backwards compat only. This will be None in FIPS mode
+ md5sum_src = module.md5(src)
+ except ValueError:
+ pass
+ elif remote_src and not os.path.isdir(src):
+ module.fail_json("Cannot copy invalid source '%s': not a file" % to_native(src))
+
+ if checksum and checksum_src != checksum:
+ module.fail_json(
+ msg='Copied file does not match the expected checksum. Transfer failed.',
+ checksum=checksum_src,
+ expected_checksum=checksum
+ )
+
+ # Special handling for recursive copy - create intermediate dirs
+ if dest.endswith(os.sep):
+ if _original_basename:
+ dest = os.path.join(dest, _original_basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ dirname = os.path.dirname(dest)
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ if not os.path.exists(b_dirname):
+ try:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
+ except AnsibleModuleError as e:
+ e.result['msg'] += ' Could not copy to {0}'.format(dest)
+ module.fail_json(**e.results)
+
+ os.makedirs(b_dirname)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ basename = os.path.basename(src)
+ if _original_basename:
+ basename = _original_basename
+ dest = os.path.join(dest, basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.exists(b_dest):
+ if os.path.islink(b_dest) and follow:
+ b_dest = os.path.realpath(b_dest)
+ dest = to_native(b_dest, errors='surrogate_or_strict')
+ if not force:
+ module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
+ if os.access(b_dest, os.R_OK) and os.path.isfile(b_dest):
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(b_dest)):
+ try:
+ # os.path.exists() can return false in some
+ # circumstances where the directory does not have
+ # the execute bit for the current user set, in
+ # which case the stat() call will raise an OSError
+ os.stat(os.path.dirname(b_dest))
+ except OSError as e:
+ if "permission denied" in to_native(e).lower():
+ module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
+ module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
+
+ if not os.access(os.path.dirname(b_dest), os.W_OK) and not module.params['unsafe_writes']:
+ module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
+
+ backup_file = None
+ if checksum_src != checksum_dest or os.path.islink(b_dest):
+
+ if not module.check_mode:
+ try:
+ if backup:
+ if os.path.exists(b_dest):
+ backup_file = module.backup_local(dest)
+ # allow for conversion from symlink.
+ if os.path.islink(b_dest):
+ os.unlink(b_dest)
+ open(b_dest, 'w').close()
+ if validate:
+ # if we have a mode, make sure we set it on the temporary
+ # file source as some validations may require it
+ if mode is not None:
+ module.set_mode_if_different(src, mode, False)
+ if owner is not None:
+ module.set_owner_if_different(src, owner, False)
+ if group is not None:
+ module.set_group_if_different(src, group, False)
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % src)
+ if rc != 0:
+ module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
+
+ b_mysrc = b_src
+ if remote_src and os.path.isfile(b_src):
+
+ _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
+
+ shutil.copyfile(b_src, b_mysrc)
+ try:
+ shutil.copystat(b_src, b_mysrc)
+ except OSError as err:
+ if err.errno == errno.ENOSYS and mode == "preserve":
+ module.warn("Unable to copy stats {0}".format(to_native(b_src)))
+ else:
+ raise
+
+ # might be needed below
+ if PY3 and hasattr(os, 'listxattr'):
+ try:
+ src_has_acls = 'system.posix_acl_access' in os.listxattr(src)
+ except Exception as e:
+ # assume unwanted ACLs by default
+ src_has_acls = True
+
+ # at this point we should always have tmp file
+ module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
+
+ if PY3 and hasattr(os, 'listxattr') and platform.system() == 'Linux' and not remote_src:
+ # atomic_move used above to copy src into dest might, in some cases,
+ # use shutil.copy2 which in turn uses shutil.copystat.
+ # Since Python 3.3, shutil.copystat copies file extended attributes:
+ # https://docs.python.org/3/library/shutil.html#shutil.copystat
+ # os.listxattr (along with others) was added to handle the operation.
+
+ # This means that on Python 3 we are copying the extended attributes which includes
+ # the ACLs on some systems - further limited to Linux as the documentation above claims
+ # that the extended attributes are copied only on Linux. Also, os.listxattr is only
+ # available on Linux.
+
+ # If not remote_src, then the file was copied from the controller. In that
+ # case, any filesystem ACLs are artifacts of the copy rather than preservation
+ # of existing attributes. Get rid of them:
+
+ if src_has_acls:
+ # FIXME If dest has any default ACLs, there are not applied to src now because
+ # they were overridden by copystat. Should/can we do anything about this?
+ # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest))
+
+ try:
+ clear_facls(dest)
+ except ValueError as e:
+ if 'setfacl' in to_native(e):
+ # No setfacl so we're okay. The controller couldn't have set a facl
+ # without the setfacl command
+ pass
+ else:
+ raise
+ except RuntimeError as e:
+ # setfacl failed.
+ if 'Operation not supported' in to_native(e):
+ # The file system does not support ACLs.
+ pass
+ else:
+ raise
+
+ except (IOError, OSError):
+ module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
+ changed = True
+ else:
+ changed = False
+
+ # If neither have checksums, both src and dest are directories.
+ if checksum_src is None and checksum_dest is None:
+ if remote_src and os.path.isdir(module.params['src']):
+ b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
+ b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
+
+ if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode:
+ shutil.copytree(b_src, b_dest, symlinks=not local_follow)
+ chown_recursive(dest, module)
+ changed = True
+
+ if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ shutil.copytree(b_src, b_dest, symlinks=not local_follow)
+ changed = True
+ chown_recursive(dest, module)
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+ if os.path.exists(b_dest):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ os.makedirs(b_dest)
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+
+ res_args = dict(
+ dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
+ )
+ if backup_file:
+ res_args['backup_file'] = backup_file
+
+ if not module.check_mode:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
new file mode 100644
index 0000000..9b4c96c
--- /dev/null
+++ b/lib/ansible/modules/cron.py
@@ -0,0 +1,765 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
+# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
+# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
+# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cron
+short_description: Manage cron.d and crontab entries
+description:
+ - Use this module to manage crontab and environment variables entries. This module allows
+ you to create environment variables and named crontab entries, update, or delete them.
+ - 'When crontab jobs are managed: the module includes one line with the description of the
+ crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
+ which is used by future ansible/module calls to find/check the state. The "name"
+ parameter should be unique, and changing the "name" value will result in a new cron
+ task being created (or a different one being removed).'
+ - When environment variables are managed, no comment line is added, but, when the module
+ needs to find/check the state, it uses the "name" parameter to find the environment
+ variable definition line.
+ - When using symbols such as %, they must be properly escaped.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - Description of a crontab entry or, if env is set, the name of environment variable.
+ - This parameter is always required as of ansible-core 2.12.
+ type: str
+ required: yes
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - When unset, this parameter defaults to the current user.
+ type: str
+ job:
+ description:
+ - The command to execute or, if env is set, the value of environment variable.
+ - The command should not contain line breaks.
+ - Required if I(state=present).
+ type: str
+ aliases: [ value ]
+ state:
+ description:
+ - Whether to ensure the job or environment variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ The assumption is that this file is exclusively managed by the module,
+ do not use if the file contains multiple entries, NEVER use for /etc/crontab.
+ - If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
+ - Many linux distros expect (and some require) the filename portion to consist solely
+ of upper- and lower-case letters, digits, underscores, and hyphens.
+ - Using this parameter requires you to specify the I(user) as well, unless I(state) is not I(present).
+ - Either this parameter or I(name) is required
+ type: path
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup_file) variable by this module.
+ type: bool
+ default: no
+ minute:
+ description:
+ - Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ hour:
+ description:
+ - Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ day:
+ description:
+ - Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ aliases: [ dom ]
+ month:
+ description:
+ - Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
+ type: str
+ default: "*"
+ weekday:
+ description:
+ - Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
+ type: str
+ default: "*"
+ aliases: [ dow ]
+ special_time:
+ description:
+ - Special time specification nickname.
+ type: str
+ choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
+ version_added: "1.3"
+ disabled:
+ description:
+ - If the job should be disabled (commented out) in the crontab.
+ - Only has effect if I(state=present).
+ type: bool
+ default: no
+ version_added: "2.0"
+ env:
+ description:
+ - If set, manages a crontab's environment variable.
+ - New variables are added on top of crontab.
+ - I(name) and I(value) parameters are the name and the value of environment variable.
+ type: bool
+ default: false
+ version_added: "2.1"
+ insertafter:
+ description:
+ - Used with I(state=present) and I(env).
+ - If specified, the environment variable will be inserted after the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+ insertbefore:
+ description:
+ - Used with I(state=present) and I(env).
+ - If specified, the environment variable will be inserted before the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+requirements:
+ - cron (any 'vixie cron' conformant variant, like cronie)
+author:
+ - Dane Summers (@dsummersl)
+ - Mike Grozak (@rhaido)
+ - Patrick Callahan (@dirtyharrycallahan)
+ - Evan Kaufman (@EvanK)
+ - Luca Berruti (@lberruti)
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: posix
+'''
+
+EXAMPLES = r'''
+- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
+ ansible.builtin.cron:
+ name: "check dirs"
+ minute: "0"
+ hour: "5,2"
+ job: "ls -alh > /dev/null"
+
+- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
+ ansible.builtin.cron:
+ name: "an old job"
+ state: absent
+
+- name: Creates an entry like "@reboot /some/job.sh"
+ ansible.builtin.cron:
+ name: "a job for reboot"
+ special_time: reboot
+ job: "/some/job.sh"
+
+- name: Creates an entry like "PATH=/opt/bin" on top of crontab
+ ansible.builtin.cron:
+ name: PATH
+ env: yes
+ job: /opt/bin
+
+- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
+ ansible.builtin.cron:
+ name: APP_HOME
+ env: yes
+ job: /srv/app
+ insertafter: PATH
+
+- name: Creates a cron file under /etc/cron.d
+ ansible.builtin.cron:
+ name: yum autoupdate
+ weekday: "2"
+ minute: "0"
+ hour: "12"
+ user: root
+ job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
+ cron_file: ansible_yum-autoupdate
+
+- name: Removes a cron file from under /etc/cron.d
+ ansible.builtin.cron:
+ name: "yum autoupdate"
+ cron_file: ansible_yum-autoupdate
+ state: absent
+
+- name: Removes "APP_HOME" environment variable from crontab
+ ansible.builtin.cron:
+ name: APP_HOME
+ env: yes
+ state: absent
+'''
+
+RETURN = r'''#'''
+
+import os
+import platform
+import pwd
+import re
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronTabError(Exception):
+ pass
+
+
+class CronTab(object):
+ """
+ CronTab object to write time based crontab file
+
+ user - the user of the crontab (defaults to current user)
+ cron_file - a cron file under /etc/cron.d, or an absolute path
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.root = (os.getuid() == 0)
+ self.lines = None
+ self.ansible = "#Ansible: "
+ self.n_existing = ''
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.b_cron_file, 'rb')
+ self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
+ self.lines = self.n_existing.splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronTabError("Unable to read crontab")
+
+ self.n_existing = out
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
+ not re.match(r'# \(/tmp/.*installed on.*\)', l) and
+ not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ else:
+ pattern = re.escape(l) + '[\r\n]?'
+ self.n_existing = re.sub(pattern, '', self.n_existing, 1)
+ count += 1
+
+ def is_empty(self):
+ if len(self.lines) == 0:
+ return True
+ else:
+ for line in self.lines:
+ if line.strip():
+ return False
+ return True
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'wb')
+ elif self.cron_file:
+ fileh = open(self.b_cron_file, 'wb')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ os.chmod(path, int('0644', 8))
+ fileh = os.fdopen(filed, 'wb')
+
+ fileh.write(to_bytes(self.render()))
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ # set SELinux permissions
+ if self.module.selinux_enabled() and self.cron_file:
+ self.module.set_default_selinux_context(self.cron_file, False)
+
+ def do_comment(self, name):
+ return "%s%s" % (self.ansible, name)
+
+ def add_job(self, name, job):
+ # Add the comment
+ self.lines.append(self.do_comment(name))
+
+ # Add the job
+ self.lines.append("%s" % (job))
+
+ def update_job(self, name, job):
+ return self._update_job(name, job, self.do_add_job)
+
+ def do_add_job(self, lines, comment, job):
+ lines.append(comment)
+
+ lines.append("%s" % (job))
+
+ def remove_job(self, name):
+ return self._update_job(name, "", self.do_remove_job)
+
+ def do_remove_job(self, lines, comment, job):
+ return None
+
+ def add_env(self, decl, insertafter=None, insertbefore=None):
+ if not (insertafter or insertbefore):
+ self.lines.insert(0, decl)
+ return
+
+ if insertafter:
+ other_name = insertafter
+ elif insertbefore:
+ other_name = insertbefore
+ other_decl = self.find_env(other_name)
+ if len(other_decl) > 0:
+ if insertafter:
+ index = other_decl[0] + 1
+ elif insertbefore:
+ index = other_decl[0]
+ self.lines.insert(index, decl)
+ return
+
+ self.module.fail_json(msg="Variable named '%s' not found." % other_name)
+
+ def update_env(self, name, decl):
+ return self._update_env(name, decl, self.do_add_env)
+
+ def do_add_env(self, lines, decl):
+ lines.append(decl)
+
+ def remove_env(self, name):
+ return self._update_env(name, '', self.do_remove_env)
+
+ def do_remove_env(self, lines, decl):
+ return None
+
+ def remove_job_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+
+ def find_job(self, name, job=None):
+ # attempt to find job by 'Ansible:' header comment
+ comment = None
+ for l in self.lines:
+ if comment is not None:
+ if comment == name:
+ return [comment, l]
+ else:
+ comment = None
+ elif re.match(r'%s' % self.ansible, l):
+ comment = re.sub(r'%s' % self.ansible, '', l)
+
+ # failing that, attempt to find job by exact match
+ if job:
+ for i, l in enumerate(self.lines):
+ if l == job:
+ # if no leading ansible header, insert one
+ if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
+ self.lines.insert(i, self.do_comment(name))
+ return [self.lines[i], l, True]
+ # if a leading blank ansible header AND job has a name, update header
+ elif name and self.lines[i - 1] == self.do_comment(None):
+ self.lines[i - 1] = self.do_comment(name)
+ return [self.lines[i - 1], l, True]
+
+ return []
+
+ def find_env(self, name):
+ for index, l in enumerate(self.lines):
+ if re.match(r'^%s=' % name, l):
+ return [index, l]
+
+ return []
+
+ def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
+ # normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
+ job = job.strip('\r\n')
+
+ if disabled:
+ disable_prefix = '#'
+ else:
+ disable_prefix = ''
+
+ if special:
+ if self.cron_file:
+ return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
+ else:
+ return "%s@%s %s" % (disable_prefix, special, job)
+ else:
+ if self.cron_file:
+ return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
+ else:
+ return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
+
+ def get_jobnames(self):
+ jobnames = []
+
+ for l in self.lines:
+ if re.match(r'%s' % self.ansible, l):
+ jobnames.append(re.sub(r'%s' % self.ansible, '', l))
+
+ return jobnames
+
+ def get_envnames(self):
+ envnames = []
+
+ for l in self.lines:
+ if re.match(r'^\S+=', l):
+ envnames.append(l.split('=')[0])
+
+ return envnames
+
+ def _update_job(self, name, job, addlinesfunction):
+ ansiblename = self.do_comment(name)
+ newlines = []
+ comment = None
+
+ for l in self.lines:
+ if comment is not None:
+ addlinesfunction(newlines, comment, job)
+ comment = None
+ elif l == ansiblename:
+ comment = l
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ if len(newlines) == 0:
+ return True
+ else:
+ return False # TODO add some more error testing
+
+ def _update_env(self, name, decl, addenvfunction):
+ newlines = []
+
+ for l in self.lines:
+ if re.match(r'^%s=' % name, l):
+ addenvfunction(newlines, decl)
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render this crontab as it would be in the crontab.
+ """
+ crons = []
+ for cron in self.lines:
+ crons.append(cron)
+
+ result = '\n'.join(crons)
+ if result:
+ result = result.rstrip('\r\n') + '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+def main():
+ # The following example playbooks:
+ #
+ # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
+ #
+ # - name: do the job
+ # cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
+ #
+ # - name: no job
+ # cron: name="an old job" state=absent
+ #
+ # - name: sets env
+ # cron: name="PATH" env=yes value="/bin:/usr/bin"
+ #
+ # Would produce:
+ # PATH=/bin:/usr/bin
+ # # Ansible: check dirs
+ # * * 5,2 * * ls -alh > /dev/null
+ # # Ansible: do the job
+ # * * 5,2 * * /some/dir/job.sh
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ user=dict(type='str'),
+ job=dict(type='str', aliases=['value']),
+ cron_file=dict(type='path'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ backup=dict(type='bool', default=False),
+ minute=dict(type='str', default='*'),
+ hour=dict(type='str', default='*'),
+ day=dict(type='str', default='*', aliases=['dom']),
+ month=dict(type='str', default='*'),
+ weekday=dict(type='str', default='*', aliases=['dow']),
+ special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
+ disabled=dict(type='bool', default=False),
+ env=dict(type='bool', default=False),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['insertafter', 'insertbefore'],
+ ],
+ )
+
+ name = module.params['name']
+ user = module.params['user']
+ job = module.params['job']
+ cron_file = module.params['cron_file']
+ state = module.params['state']
+ backup = module.params['backup']
+ minute = module.params['minute']
+ hour = module.params['hour']
+ day = module.params['day']
+ month = module.params['month']
+ weekday = module.params['weekday']
+ special_time = module.params['special_time']
+ disabled = module.params['disabled']
+ env = module.params['env']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ do_install = state == 'present'
+
+ changed = False
+ res_args = dict()
+ warnings = list()
+
+ if cron_file:
+
+ if cron_file == '/etc/crontab':
+ module.fail_json(msg="Will not manage /etc/crontab via cron_file, see documentation.")
+
+ cron_file_basename = os.path.basename(cron_file)
+ if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
+ warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
+ ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ crontab = CronTab(module, user, cron_file)
+
+ module.debug('cron instantiated - name: "%s"' % name)
+
+ if module._diff:
+ diff = dict()
+ diff['before'] = crontab.n_existing
+ if crontab.cron_file:
+ diff['before_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['before_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['before_header'] = 'crontab'
+
+ # --- user input validation ---
+
+ if special_time and \
+ (True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
+ module.fail_json(msg="You must specify time and date fields or special time.")
+
+ # cannot support special_time on solaris
+ if special_time and platform.system() == 'SunOS':
+ module.fail_json(msg="Solaris does not support special_time=... or @reboot")
+
+ if do_install:
+ if cron_file and not user:
+ module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
+
+ if job is None:
+ module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
+
+ if (insertafter or insertbefore) and not env:
+ module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
+
+ # if requested make a backup before making a change
+ if backup and not module.check_mode:
+ (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
+ crontab.write(backup_file)
+
+ if env:
+ if ' ' in name:
+ module.fail_json(msg="Invalid name for environment variable")
+ decl = '%s="%s"' % (name, job)
+ old_decl = crontab.find_env(name)
+
+ if do_install:
+ if len(old_decl) == 0:
+ crontab.add_env(decl, insertafter, insertbefore)
+ changed = True
+ if len(old_decl) > 0 and old_decl[1] != decl:
+ crontab.update_env(name, decl)
+ changed = True
+ else:
+ if len(old_decl) > 0:
+ crontab.remove_env(name)
+ changed = True
+ else:
+ if do_install:
+ for char in ['\r', '\n']:
+ if char in job.strip('\r\n'):
+ warnings.append('Job should not contain line breaks')
+ break
+
+ job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
+ old_job = crontab.find_job(name, job)
+
+ if len(old_job) == 0:
+ crontab.add_job(name, job)
+ changed = True
+ if len(old_job) > 0 and old_job[1] != job:
+ crontab.update_job(name, job)
+ changed = True
+ if len(old_job) > 2:
+ crontab.update_job(name, job)
+ changed = True
+ else:
+ old_job = crontab.find_job(name)
+
+ if len(old_job) > 0:
+ crontab.remove_job(name)
+ changed = True
+ if crontab.cron_file and crontab.is_empty():
+ if module._diff:
+ diff['after'] = ''
+ diff['after_header'] = '/dev/null'
+ else:
+ diff = dict()
+ if module.check_mode:
+ changed = os.path.isfile(crontab.cron_file)
+ else:
+ changed = crontab.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
+
+ # no changes to env/job, but existing crontab needs a terminating newline
+ if not changed and crontab.n_existing != '':
+ if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
+ changed = True
+
+ res_args = dict(
+ jobs=crontab.get_jobnames(),
+ envs=crontab.get_envnames(),
+ warnings=warnings,
+ changed=changed
+ )
+
+ if changed:
+ if not module.check_mode:
+ crontab.write()
+ if module._diff:
+ diff['after'] = crontab.render()
+ if crontab.cron_file:
+ diff['after_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['after_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['after_header'] = 'crontab'
+
+ res_args['diff'] = diff
+
+ # retain the backup only if crontab or cron file have changed
+ if backup and not module.check_mode:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+ # --- should never get here
+ module.exit_json(msg="Unable to execute cron task.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py
new file mode 100644
index 0000000..32f0000
--- /dev/null
+++ b/lib/ansible/modules/debconf.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debconf
+short_description: Configure a .deb package
+description:
+ - Configure a .deb package using debconf-set-selections.
+ - Or just query existing selections.
+version_added: "1.6"
+extends_documentation_fragment:
+- action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: debian
+notes:
+ - This module requires the command line debconf tools.
+ - A number of questions have to be answered (depending on the package).
+ Use 'debconf-show <package>' on any Debian or derivative with the package
+ installed to see questions/settings available.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+ - It is highly recommended to add I(no_log=True) to task while handling sensitive information using this module.
+ - The debconf module does not reconfigure packages, it just updates the debconf database.
+ An additional step is needed (typically with I(notify) if debconf makes a change)
+ to reconfigure the package and apply the changes.
+ debconf is extensively used for pre-seeding configuration prior to installation
+ rather than modifying configurations.
+ So, while dpkg-reconfigure does use debconf data, it is not always authoritative
+ and you may need to check how your package is handled.
+ - Also note dpkg-reconfigure is a 3-phase process. It invokes the
+ control scripts from the C(/var/lib/dpkg/info) directory with the
+ C(<package>.prerm reconfigure <version>),
+ C(<package>.config reconfigure <version>) and C(<package>.postinst control <version>) arguments.
+ - The main issue is that the C(<package>.config reconfigure) step for many packages
+ will first reset the debconf database (overriding changes made by this module) by
+ checking the on-disk configuration. If this is the case for your package then
+ dpkg-reconfigure will effectively ignore changes made by debconf.
+ - However as dpkg-reconfigure only executes the C(<package>.config) step if the file
+ exists, it is possible to rename it to C(/var/lib/dpkg/info/<package>.config.ignore)
+ before executing C(dpkg-reconfigure -f noninteractive <package>) and then restore it.
+ This seems to be compliant with Debian policy for the .config file.
+requirements:
+- debconf
+- debconf-utils
+options:
+ name:
+ description:
+ - Name of package to configure.
+ type: str
+ required: true
+ aliases: [ pkg ]
+ question:
+ description:
+ - A debconf configuration setting.
+ type: str
+ aliases: [ selection, setting ]
+ vtype:
+ description:
+ - The type of the value supplied.
+ - It is highly recommended to add I(no_log=True) to task while specifying I(vtype=password).
+ - C(seen) was added in Ansible 2.2.
+ type: str
+ choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
+ value:
+ description:
+ - Value to set the configuration to.
+ type: str
+ aliases: [ answer ]
+ unseen:
+ description:
+ - Do not set 'seen' flag when pre-seeding.
+ type: bool
+ default: false
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = r'''
+- name: Set default locale to fr_FR.UTF-8
+ ansible.builtin.debconf:
+ name: locales
+ question: locales/default_environment_locale
+ value: fr_FR.UTF-8
+ vtype: select
+
+- name: Set to generate locales
+ ansible.builtin.debconf:
+ name: locales
+ question: locales/locales_to_be_generated
+ value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
+ vtype: multiselect
+
+- name: Accept oracle license
+ ansible.builtin.debconf:
+ name: oracle-java7-installer
+ question: shared/accepted-oracle-license-v1-1
+ value: 'true'
+ vtype: select
+
+- name: Specifying package you can register/return the list of questions and current values
+ ansible.builtin.debconf:
+ name: tzdata
+
+- name: Pre-configure tripwire site passphrase
+ ansible.builtin.debconf:
+ name: tripwire
+ question: tripwire/site-passphrase
+ value: "{{ site_passphrase }}"
+ vtype: password
+ no_log: True
+'''
+
+RETURN = r'''#'''
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_selections(module, pkg):
+ cmd = [module.get_bin_path('debconf-show', True), pkg]
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ selections = {}
+
+ for line in out.splitlines():
+ (key, value) = line.split(':', 1)
+ selections[key.strip('*').strip()] = value.strip()
+
+ return selections
+
+
+def set_selection(module, pkg, question, vtype, value, unseen):
+ setsel = module.get_bin_path('debconf-set-selections', True)
+ cmd = [setsel]
+ if unseen:
+ cmd.append('-u')
+
+ if vtype == 'boolean':
+ if value == 'True':
+ value = 'true'
+ elif value == 'False':
+ value = 'false'
+ data = ' '.join([pkg, question, vtype, value])
+
+ return module.run_command(cmd, data=data)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['pkg']),
+ question=dict(type='str', aliases=['selection', 'setting']),
+ vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
+ value=dict(type='str', aliases=['answer']),
+ unseen=dict(type='bool', default=False),
+ ),
+ required_together=(['question', 'vtype', 'value'],),
+ supports_check_mode=True,
+ )
+
+ # TODO: enable passing array of options and/or debconf file from get-selections dump
+ pkg = module.params["name"]
+ question = module.params["question"]
+ vtype = module.params["vtype"]
+ value = module.params["value"]
+ unseen = module.params["unseen"]
+
+ prev = get_selections(module, pkg)
+
+ changed = False
+ msg = ""
+
+ if question is not None:
+ if vtype is None or value is None:
+ module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+
+ # if question doesn't exist, value cannot match
+ if question not in prev:
+ changed = True
+ else:
+
+ existing = prev[question]
+
+ # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
+ if vtype == 'boolean':
+ value = to_text(value).lower()
+ existing = to_text(prev[question]).lower()
+
+ if value != existing:
+ changed = True
+
+ if changed:
+ if not module.check_mode:
+ rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
+ if rc:
+ module.fail_json(msg=e)
+
+ curr = {question: value}
+ if question in prev:
+ prev = {question: prev[question]}
+ else:
+ prev[question] = ''
+ if module._diff:
+ after = prev.copy()
+ after.update(curr)
+ diff_dict = {'before': prev, 'after': after}
+ else:
+ diff_dict = {}
+
+ module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
+
+ module.exit_json(changed=changed, msg=msg, current=prev)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py
new file mode 100644
index 0000000..b275a20
--- /dev/null
+++ b/lib/ansible/modules/debug.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debug
+short_description: Print statements during execution
+description:
+- This module prints statements during execution and can be useful
+ for debugging variables or expressions without necessarily halting
+ the playbook.
+- Useful for debugging together with the 'when:' directive.
+- This module is also supported for Windows targets.
+version_added: '0.8'
+options:
+ msg:
+ description:
+ - The customized message that is printed. If omitted, prints a generic message.
+ type: str
+ default: 'Hello world!'
+ var:
+ description:
+ - A variable name to debug.
+ - Mutually exclusive with the C(msg) option.
+ - Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
+ so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
+ type: str
+ verbosity:
+ description:
+ - A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above.
+ type: int
+ default: 0
+ version_added: '2.1'
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.conn
+- action_common_attributes.flow
+
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ become:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ connection:
+ support: none
+ delegation:
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ support: partial
+ platform:
+ support: full
+ platforms: all
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+- Dag Wieers (@dagwieers)
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Print the gateway for each host when defined
+ ansible.builtin.debug:
+ msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
+ when: ansible_default_ipv4.gateway is defined
+
+- name: Get uptime information
+ ansible.builtin.shell: /usr/bin/uptime
+ register: result
+
+- name: Print return information from the previous task
+ ansible.builtin.debug:
+ var: result
+ verbosity: 2
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname]
+ verbosity: 4
+
+- name: Prints two lines of messages, but only if there is an environment value set
+ ansible.builtin.debug:
+ msg:
+ - "Provisioning based on YOUR_KEY which is: {{ lookup('ansible.builtin.env', 'YOUR_KEY') }}"
+ - "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
+'''
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
new file mode 100644
index 0000000..8131833
--- /dev/null
+++ b/lib/ansible/modules/dnf.py
@@ -0,0 +1,1468 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Cristian van Ee <cristian at cvee.org>
+# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
+# Copyright 2018 Adam Miller <admiller@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnf
+version_added: 1.9
+short_description: Manages packages with the I(dnf) package manager
+description:
+ - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
+options:
+ name:
+ description:
+ - "A package name or package specifier with version, like C(name-1.0).
+ When using state=latest, this can be '*' which means run: dnf -y update.
+ You can also pass a url or a local path to a rpm file.
+ To operate on several packages this can accept a comma separated string of packages or a list of packages."
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name >= 1.0).
+ Spaces around the operator are required.
+ - You can also pass an absolute path for a binary which is provided by the package to install.
+ See examples for more information.
+ required: true
+ aliases:
+ - pkg
+ type: list
+ elements: str
+
+ list:
+ description:
+ - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks.
+ Use M(ansible.builtin.package_facts) instead of the C(list) argument as a best practice.
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ choices: ['absent', 'present', 'installed', 'removed', 'latest']
+ type: str
+
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
+
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+ type: list
+ elements: str
+
+ conf_file:
+ description:
+ - The remote dnf configuration file to use for the transaction.
+ type: str
+
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ - This setting affects packages installed from a repository as well as
+ "local" packages installed from the filesystem or a URL.
+ type: bool
+ default: 'no'
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ version_added: "2.3"
+ default: "/"
+ type: str
+
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ version_added: "2.6"
+ type: str
+
+ autoremove:
+ description:
+ - If C(true), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest. This can be a
+ list or a comma separated string.
+ version_added: "2.7"
+ type: list
+ elements: str
+ skip_broken:
+ description:
+ - Skip all unavailable packages or packages with broken dependencies
+ without raising an error. Equivalent to passing the --skip-broken option.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ update_cache:
+ description:
+ - Force dnf to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "2.7"
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ security:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked security related.
+ - Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ bugfix:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked bugfix related.
+ - Note that, similar to C(dnf upgrade-minimal), this filter applies to dependencies as well.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ version_added: "2.7"
+ type: list
+ elements: str
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ version_added: "2.7"
+ type: list
+ elements: str
+ disable_excludes:
+ description:
+ - Disable the excludes defined in DNF config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in dnf.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ version_added: "2.7"
+ type: str
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ sslverify:
+ description:
+ - Disables SSL validation of the repository server for this transaction.
+ - This should be set to C(false) if one of the configured repositories is using an untrusted or self-signed certificate.
+ type: bool
+ default: "yes"
+ version_added: "2.13"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ install_repoquery:
+ description:
+ - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
+ parity/compatibility with the I(yum) module.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the dnf lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ allowerasing:
+ description:
+ - If C(true) it allows erasing of installed packages to resolve dependencies.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.10"
+ nobest:
+ description:
+ - Set best option to False, so that transactions are not limited to best candidates only.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.11"
+ cacheonly:
+ description:
+ - Tells dnf to run entirely from system cache; does not download or update metadata.
+ type: bool
+ default: "no"
+ version_added: "2.12"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ details: In the case of dnf, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
+ support: partial
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - Group removal doesn't work if the group was installed with Ansible because
+ upstream dnf's API doesn't properly mark groups as installed, therefore upon
+ removal the module is unable to detect that the group is installed
+ (https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
+requirements:
+ - "python >= 2.6"
+ - python-dnf
+ - for the autoremove option you need dnf >= 2.0.1"
+author:
+ - Igor Gnatenko (@ignatenkobrain) <i.gnatenko.brain@gmail.com>
+ - Cristian van Ee (@DJMuggs) <cristian at cvee.org>
+ - Berend De Schouwer (@berenddeschouwer)
+ - Adam Miller (@maxamillion) <admiller@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ ansible.builtin.dnf:
+ name: httpd
+ state: latest
+
+- name: Install Apache >= 2.4
+ ansible.builtin.dnf:
+ name: httpd >= 2.4
+ state: present
+
+- name: Install the latest version of Apache and MariaDB
+ ansible.builtin.dnf:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+
+- name: Remove the Apache package
+ ansible.builtin.dnf:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ ansible.builtin.dnf:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Upgrade all packages
+ ansible.builtin.dnf:
+ name: "*"
+ state: latest
+
+- name: Update the webserver, depending on which is installed on the system. Do not install the other one
+ ansible.builtin.dnf:
+ name:
+ - httpd
+ - nginx
+ state: latest
+ update_only: yes
+
+- name: Install the nginx rpm from a remote repo
+ ansible.builtin.dnf:
+ name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
+ state: present
+
+- name: Install nginx rpm from a local file
+ ansible.builtin.dnf:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install Package based upon the file it provides
+ ansible.builtin.dnf:
+ name: /usr/bin/cowsay
+ state: present
+
+- name: Install the 'Development tools' package group
+ ansible.builtin.dnf:
+ name: '@Development tools'
+ state: present
+
+- name: Autoremove unneeded packages installed as dependencies
+ ansible.builtin.dnf:
+ autoremove: yes
+
+- name: Uninstall httpd but keep its dependencies
+ ansible.builtin.dnf:
+ name: httpd
+ state: absent
+ autoremove: no
+
+- name: Install a modularity appstream with defined stream and profile
+ ansible.builtin.dnf:
+ name: '@postgresql:9.6/client'
+ state: present
+
+- name: Install a modularity appstream with defined stream
+ ansible.builtin.dnf:
+ name: '@postgresql:9.6'
+ state: present
+
+- name: Install a modularity appstream with defined profile
+ ansible.builtin.dnf:
+ name: '@postgresql/client'
+ state: present
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_file
+from ansible.module_utils.six import PY2, text_type
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+
+# NOTE dnf Python bindings import is postponed, see DnfModule._ensure_dnf(),
+# because we need AnsibleModule object to use get_best_parsable_locale()
+# to set proper locale before importing dnf to be able to scrape
+# the output in some cases (FIXME?).
+dnf = None
+
+
+class DnfModule(YumDnf):
+ """
+ DNF Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+ # This populates instance vars for all argument spec params
+ super(DnfModule, self).__init__(module)
+
+ self._ensure_dnf()
+ self.lockfile = "/var/cache/dnf/*_lock.pid"
+ self.pkg_mgr_name = "dnf"
+
+ try:
+ self.with_modules = dnf.base.WITH_MODULES
+ except AttributeError:
+ self.with_modules = False
+
+ # DNF specific args that are not part of YumDnf
+ self.allowerasing = self.module.params['allowerasing']
+ self.nobest = self.module.params['nobest']
+
+ def is_lockfile_pid_valid(self):
+ # FIXME? it looks like DNF takes care of invalid lock files itself?
+ # https://github.com/ansible/ansible/issues/57189
+ return True
+
+ def _sanitize_dnf_error_msg_install(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to filter in an install scenario. Do that here.
+ """
+ if (
+ to_text("no package matched") in to_text(error) or
+ to_text("No match for argument:") in to_text(error)
+ ):
+ return "No package {0} available.".format(spec)
+
+ return error
+
+ def _sanitize_dnf_error_msg_remove(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to ignore in a removal scenario as known benign
+ failures. Do that here.
+ """
+ if (
+ 'no package matched' in to_native(error) or
+ 'No match for argument:' in to_native(error)
+ ):
+ return (False, "{0} is not installed".format(spec))
+
+ # Return value is tuple of:
+ # ("Is this actually a failure?", "Error Message")
+ return (True, error)
+
+ def _package_dict(self, package):
+ """Return a dictionary of information for the package."""
+ # NOTE: This no longer contains the 'dnfstate' field because it is
+ # already known based on the query type.
+ result = {
+ 'name': package.name,
+ 'arch': package.arch,
+ 'epoch': str(package.epoch),
+ 'release': package.release,
+ 'version': package.version,
+ 'repo': package.repoid}
+
+ # envra format for alignment with the yum module
+ result['envra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(**result)
+
+ # keep nevra key for backwards compat as it was previously
+ # defined with a value in envra format
+ result['nevra'] = result['envra']
+
+ if package.installtime == 0:
+ result['yumstate'] = 'available'
+ else:
+ result['yumstate'] = 'installed'
+
+ return result
+
+ def _split_package_arch(self, packagename):
+ # This list was auto generated on a Fedora 28 system with the following one-liner
+ # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
+ redhat_rpm_arches = [
+ "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
+ "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
+ "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
+ "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
+ "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
+ "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
+ "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
+ "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
+ "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
+ ]
+
+ name, delimiter, arch = packagename.rpartition('.')
+ if name and arch and arch in redhat_rpm_arches:
+ return name, arch
+ return packagename, None
+
+ def _packagename_dict(self, packagename):
+ """
+ Return a dictionary of information for a package name string or None
+ if the package name doesn't contain at least all NVR elements
+ """
+
+ if packagename[-4:] == '.rpm':
+ packagename = packagename[:-4]
+
+ rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
+ try:
+ arch = None
+ nevr, arch = self._split_package_arch(packagename)
+ if arch:
+ packagename = nevr
+ rpm_nevr_match = rpm_nevr_re.match(packagename)
+ if rpm_nevr_match:
+ name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
+ if not version or not version.split('.')[0].isdigit():
+ return None
+ else:
+ return None
+ except AttributeError as e:
+ self.module.fail_json(
+ msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
+ rc=1,
+ results=[]
+ )
+
+ if not epoch:
+ epoch = "0"
+
+ if ':' in name:
+ epoch_name = name.split(":")
+
+ epoch = epoch_name[0]
+ name = ''.join(epoch_name[1:])
+
+ result = {
+ 'name': name,
+ 'epoch': epoch,
+ 'release': release,
+ 'version': version,
+ }
+
+ return result
+
+ # Original implementation from yum.rpmUtils.miscutils (GPLv2+)
+ # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
+ def _compare_evr(self, e1, v1, r1, e2, v2, r2):
+ # return 1: a is newer than b
+ # 0: a and b are the same version
+ # -1: b is newer than a
+ if e1 is None:
+ e1 = '0'
+ else:
+ e1 = str(e1)
+ v1 = str(v1)
+ r1 = str(r1)
+ if e2 is None:
+ e2 = '0'
+ else:
+ e2 = str(e2)
+ v2 = str(v2)
+ r2 = str(r2)
+ rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
+ return rc
+
+ def _ensure_dnf(self):
+ locale = get_best_parsable_locale(self.module)
+ os.environ['LC_ALL'] = os.environ['LC_MESSAGES'] = locale
+ os.environ['LANGUAGE'] = os.environ['LANG'] = locale
+
+ global dnf
+ try:
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
+ HAS_DNF = True
+ except ImportError:
+ HAS_DNF = False
+
+ if HAS_DNF:
+ return
+
+ system_interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2',
+ '/usr/bin/python']
+
+ if not has_respawned():
+ # probe well-known system Python locations for accessible bindings, favoring py3
+ interpreter = probe_interpreters_for_module(system_interpreters, 'dnf')
+
+ if interpreter:
+ # respawn under the interpreter where the bindings should be found
+ respawn_module(interpreter)
+ # end of the line for this module, the process will exit here once the respawned module completes
+
+ # done all we can do, something is just broken (auto-install isn't useful anymore with respawn, so it was removed)
+ self.module.fail_json(
+ msg="Could not import the dnf python module using {0} ({1}). "
+ "Please install `python3-dnf` or `python2-dnf` package or ensure you have specified the "
+ "correct ansible_python_interpreter. (attempted {2})"
+ .format(sys.executable, sys.version.replace('\n', ''), system_interpreters),
+ results=[]
+ )
+
+ def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/', sslverify=True):
+ """Configure the dnf Base object."""
+
+ conf = base.conf
+
+ # Change the configuration file path if provided, this must be done before conf.read() is called
+ if conf_file:
+ # Fail if we can't read the configuration file.
+ if not os.access(conf_file, os.R_OK):
+ self.module.fail_json(
+ msg="cannot read configuration file", conf_file=conf_file,
+ results=[],
+ )
+ else:
+ conf.config_file_path = conf_file
+
+ # Read the configuration file
+ conf.read()
+
+ # Turn off debug messages in the output
+ conf.debuglevel = 0
+
+ # Set whether to check gpg signatures
+ conf.gpgcheck = not disable_gpg_check
+ conf.localpkg_gpgcheck = not disable_gpg_check
+
+ # Don't prompt for user confirmations
+ conf.assumeyes = True
+
+ # Set certificate validation
+ conf.sslverify = sslverify
+
+ # Set installroot
+ conf.installroot = installroot
+
+ # Load substitutions from the filesystem
+ conf.substitutions.update_from_etc(installroot)
+
+ # Handle different DNF versions immutable mutable datatypes and
+ # dnf v1/v2/v3
+ #
+ # In DNF < 3.0 are lists, and modifying them works
+ # In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
+ # In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
+ #
+ # https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
+ #
+ # Set excludes
+ if self.exclude:
+ _excludes = list(conf.exclude)
+ _excludes.extend(self.exclude)
+ conf.exclude = _excludes
+ # Set disable_excludes
+ if self.disable_excludes:
+ _disable_excludes = list(conf.disable_excludes)
+ if self.disable_excludes not in _disable_excludes:
+ _disable_excludes.append(self.disable_excludes)
+ conf.disable_excludes = _disable_excludes
+
+ # Set releasever
+ if self.releasever is not None:
+ conf.substitutions['releasever'] = self.releasever
+
+ if conf.substitutions.get('releasever') is None:
+ self.module.warn(
+ 'Unable to detect release version (use "releasever" option to specify release version)'
+ )
+ # values of conf.substitutions are expected to be strings
+ # setting this to an empty string instead of None appears to mimic the DNF CLI behavior
+ conf.substitutions['releasever'] = ''
+
+ # Set skip_broken (in dnf this is strict=0)
+ if self.skip_broken:
+ conf.strict = 0
+
+ # Set best
+ if self.nobest:
+ conf.best = 0
+
+ if self.download_only:
+ conf.downloadonly = True
+ if self.download_dir:
+ conf.destdir = self.download_dir
+
+ if self.cacheonly:
+ conf.cacheonly = True
+
+ # Default in dnf upstream is true
+ conf.clean_requirements_on_remove = self.autoremove
+
+ # Default in dnf (and module default) is True
+ conf.install_weak_deps = self.install_weak_deps
+
+ def _specify_repositories(self, base, disablerepo, enablerepo):
+ """Enable and disable repositories matching the provided patterns."""
+ base.read_all_repos()
+ repos = base.repos
+
+ # Disable repositories
+ for repo_pattern in disablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.disable()
+
+ # Enable repositories
+ for repo_pattern in enablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.enable()
+
+ def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, sslverify):
+ """Return a fully configured dnf Base object."""
+ base = dnf.Base()
+ self._configure_base(base, conf_file, disable_gpg_check, installroot, sslverify)
+ try:
+ # this method has been supported in dnf-4.2.17-6 or later
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1788212
+ base.setup_loggers()
+ except AttributeError:
+ pass
+ try:
+ base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
+ base.pre_configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+ self._specify_repositories(base, disablerepo, enablerepo)
+ try:
+ base.configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+
+ try:
+ if self.update_cache:
+ try:
+ base.update_cache()
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+ base.fill_sack(load_system_repo='auto')
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+
+ add_security_filters = getattr(base, "add_security_filters", None)
+ if callable(add_security_filters):
+ filters = {}
+ if self.bugfix:
+ filters.setdefault('types', []).append('bugfix')
+ if self.security:
+ filters.setdefault('types', []).append('security')
+ if filters:
+ add_security_filters('eq', **filters)
+ else:
+ filters = []
+ if self.bugfix:
+ key = {'advisory_type__eq': 'bugfix'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if self.security:
+ key = {'advisory_type__eq': 'security'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if filters:
+ base._update_security_filters = filters
+
+ return base
+
+ def list_items(self, command):
+ """List package info based on the command."""
+ # Rename updates to upgrades
+ if command == 'updates':
+ command = 'upgrades'
+
+ # Return the corresponding packages
+ if command in ['installed', 'upgrades', 'available']:
+ results = [
+ self._package_dict(package)
+ for package in getattr(self.base.sack.query(), command)()]
+ # Return the enabled repository ids
+ elif command in ['repos', 'repositories']:
+ results = [
+ {'repoid': repo.id, 'state': 'enabled'}
+ for repo in self.base.repos.iter_enabled()]
+ # Return any matching packages
+ else:
+ packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
+ results = [self._package_dict(package) for package in packages]
+
+ self.module.exit_json(msg="", results=results)
+
+ def _is_installed(self, pkg):
+ installed = self.base.sack.query().installed()
+
+ package_spec = {}
+ name, arch = self._split_package_arch(pkg)
+ if arch:
+ package_spec['arch'] = arch
+
+ package_details = self._packagename_dict(pkg)
+ if package_details:
+ package_details['epoch'] = int(package_details['epoch'])
+ package_spec.update(package_details)
+ else:
+ package_spec['name'] = name
+
+ return bool(installed.filter(**package_spec))
+
+ def _is_newer_version_installed(self, pkg_name):
+ candidate_pkg = self._packagename_dict(pkg_name)
+ if not candidate_pkg:
+ # The user didn't provide a versioned rpm, so version checking is
+ # not required
+ return False
+
+ installed = self.base.sack.query().installed()
+ installed_pkg = installed.filter(name=candidate_pkg['name']).run()
+ if installed_pkg:
+ installed_pkg = installed_pkg[0]
+
+ # this looks weird but one is a dict and the other is a dnf.Package
+ evr_cmp = self._compare_evr(
+ installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
+ candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
+ )
+
+ return evr_cmp == 1
+ else:
+ return False
+
+ def _mark_package_install(self, pkg_spec, upgrade=False):
+ """Mark the package for install."""
+ is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
+ is_installed = self._is_installed(pkg_spec)
+ try:
+ if is_newer_version_installed:
+ if self.allow_downgrade:
+ # dnf only does allow_downgrade, we have to handle this ourselves
+ # because it allows a possibility for non-idempotent transactions
+ # on a system's package set (pending the yum repo has many old
+ # NVRs indexed)
+ if upgrade:
+ if is_installed: # Case 1
+ # TODO: Is this case reachable?
+ #
+ # _is_installed() demands a name (*not* NVR) or else is always False
+ # (wildcards are treated literally).
+ #
+ # Meanwhile, _is_newer_version_installed() demands something versioned
+ # or else is always false.
+ #
+ # I fail to see how they can both be true at the same time for any
+ # given pkg_spec. -re
+ self.base.upgrade(pkg_spec)
+ else: # Case 2
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+ else: # Case 3
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+ else: # Case 4, Nothing to do, report back
+ pass
+ elif is_installed: # A potentially older (or same) version is installed
+ if upgrade: # Case 5
+ self.base.upgrade(pkg_spec)
+ else: # Case 6, Nothing to do, report back
+ pass
+ else: # Case 7, The package is not installed, simply install it
+ self.base.install(pkg_spec, strict=self.base.conf.strict)
+
+ return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
+
+ except dnf.exceptions.MarkingError as e:
+ return {
+ 'failed': True,
+ 'msg': "No package {0} available.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.DepsolveError as e:
+ return {
+ 'failed': True,
+ 'msg': "Depsolve Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ return {'failed': False, 'msg': '', 'failure': ''}
+ else:
+ return {
+ 'failed': True,
+ 'msg': "Unknown Error occurred for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ def _whatprovides(self, filepath):
+ self.base.read_all_repos()
+ available = self.base.sack.query().available()
+ # Search in file
+ files_filter = available.filter(file=filepath)
+ # And Search in provides
+ pkg_spec = files_filter.union(available.filter(provides=filepath)).run()
+
+ if pkg_spec:
+ return pkg_spec[0].name
+
+ def _parse_spec_group_file(self):
+ pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
+ already_loaded_comps = False # Only load this if necessary, it's slow
+
+ for name in self.names:
+ if '://' in name:
+ name = fetch_file(self.module, name)
+ filenames.append(name)
+ elif name.endswith(".rpm"):
+ filenames.append(name)
+ elif name.startswith('/'):
+ # like "dnf install /usr/bin/vi"
+ pkg_spec = self._whatprovides(name)
+ if pkg_spec:
+ pkg_specs.append(pkg_spec)
+ continue
+ elif name.startswith("@") or ('/' in name):
+ if not already_loaded_comps:
+ self.base.read_comps()
+ already_loaded_comps = True
+
+ grp_env_mdl_candidate = name[1:].strip()
+
+ if self.with_modules:
+ mdl = self.module_base._get_modules(grp_env_mdl_candidate)
+ if mdl[0]:
+ module_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ pkg_specs.append(name)
+ return pkg_specs, grp_specs, module_specs, filenames
+
+ def _update_only(self, pkgs):
+ not_installed = []
+ for pkg in pkgs:
+ if self._is_installed(pkg):
+ try:
+ if isinstance(to_text(pkg), text_type):
+ self.base.upgrade(pkg)
+ else:
+ self.base.package_upgrade(pkg)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting update_only operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+ else:
+ not_installed.append(pkg)
+
+ return not_installed
+
+ def _install_remote_rpms(self, filenames):
+ if int(dnf.__version__.split(".")[0]) >= 2:
+ pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
+ else:
+ pkgs = []
+ try:
+ for filename in filenames:
+ pkgs.append(self.base.add_remote_rpm(filename))
+ except IOError as e:
+ if to_text("Can not load RPM file") in to_text(e):
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
+ results=[],
+ rc=1,
+ )
+ if self.update_only:
+ self._update_only(pkgs)
+ else:
+ for pkg in pkgs:
+ try:
+ if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
+ if self.allow_downgrade:
+ self.base.package_install(pkg, strict=self.base.conf.strict)
+ else:
+ self.base.package_install(pkg, strict=self.base.conf.strict)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occurred attempting remote rpm operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+
+ def _is_module_installed(self, module_spec):
+ if self.with_modules:
+ module_spec = module_spec.strip()
+ module_list, nsv = self.module_base._get_modules(module_spec)
+ enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
+
+ if enabled_streams:
+ if nsv.stream:
+ if nsv.stream in enabled_streams:
+ return True # The provided stream was found
+ else:
+ return False # The provided stream was not found
+ else:
+ return True # No stream provided, but module found
+
+ return False # seems like a sane default
+
+ def ensure(self):
+
+ response = {
+ 'msg': "",
+ 'changed': False,
+ 'results': [],
+ 'rc': 0
+ }
+
+ # Accumulate failures. Package management modules install what they can
+ # and fail with a message about what they can't.
+ failure_response = {
+ 'msg': "",
+ 'failures': [],
+ 'results': [],
+ 'rc': 1
+ }
+
+ # Autoremove is called alone
+ # Jump to remove path where base.autoremove() is run
+ if not self.names and self.autoremove:
+ self.names = []
+ self.state = 'absent'
+
+ if self.names == ['*'] and self.state == 'latest':
+ try:
+ self.base.upgrade_all()
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to upgrade all packages"
+ self.module.fail_json(**failure_response)
+ else:
+ pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
+
+ pkg_specs = [p.strip() for p in pkg_specs]
+ filenames = [f.strip() for f in filenames]
+ groups = []
+ environments = []
+ for group_spec in (g.strip() for g in group_specs):
+ group = self.base.comps.group_by_pattern(group_spec)
+ if group:
+ groups.append(group.id)
+ else:
+ environment = self.base.comps.environment_by_pattern(group_spec)
+ if environment:
+ environments.append(environment.id)
+ else:
+ self.module.fail_json(
+ msg="No group {0} available.".format(group_spec),
+ results=[],
+ )
+
+ if self.state in ['installed', 'present']:
+ # Install files.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Install modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if not self._is_module_installed(module):
+ response['results'].append("Module {0} installed.".format(module))
+ self.module_base.install([module])
+ self.module_base.enable([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ # Install groups.
+ for group in groups:
+ try:
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install group: {0}".format(group)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ # In dnf 2.0 if all the mandatory packages in a group do
+ # not install, an error is raised. We want to capture
+ # this but still install as much as possible.
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if module_specs and not self.with_modules:
+ # This means that the group or env wasn't found in comps
+ self.module.fail_json(
+ msg="No group {0} available.".format(module_specs[0]),
+ results=[],
+ )
+
+ # Install packages.
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ install_result = self._mark_package_install(pkg_spec)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ elif self.state == 'latest':
+ # "latest" is same as "installed" for filenames.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Upgrade modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} upgraded.".format(module))
+ self.module_base.upgrade([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ try:
+ self.base.group_upgrade(group)
+ response['results'].append("Group {0} upgraded.".format(group))
+ except dnf.exceptions.CompsError:
+ if not self.update_only:
+ # If not already installed, try to install.
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ try:
+ self.base.environment_upgrade(environment)
+ except dnf.exceptions.CompsError:
+ # If not already installed, try to install.
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred attempting to install environment: {0}".format(environment)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ # Previously we forced base.conf.best=True here.
+ # However in 2.11+ there is a self.nobest option, so defer to that.
+ # Note, however, that just because nobest isn't set, doesn't mean that
+ # base.conf.best is actually true. We only force it false in
+ # _configure_base(), we never set it to true, and it can default to false.
+ # Thus, we still need to explicitly set it here.
+ self.base.conf.best = not self.nobest
+ install_result = self._mark_package_install(pkg_spec, upgrade=True)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ else:
+ # state == absent
+ if filenames:
+ self.module.fail_json(
+ msg="Cannot remove paths -- please specify package name.",
+ results=[],
+ )
+
+ # Remove modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} removed.".format(module))
+ self.module_base.remove([module])
+ self.module_base.disable([module])
+ self.module_base.reset([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ self.base.group_remove(group)
+ except dnf.exceptions.CompsError:
+ # Group is already uninstalled.
+ pass
+ except AttributeError:
+ # Group either isn't installed or wasn't marked installed at install time
+ # because of DNF bug
+ #
+ # This is necessary until the upstream dnf API bug is fixed where installing
+ # a group via the dnf API doesn't actually mark the group as installed
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1620324
+ pass
+
+ for environment in environments:
+ try:
+ self.base.environment_remove(environment)
+ except dnf.exceptions.CompsError:
+ # Environment is already uninstalled.
+ pass
+
+ installed = self.base.sack.query().installed()
+ for pkg_spec in pkg_specs:
+ # short-circuit installed check for wildcard matching
+ if '*' in pkg_spec:
+ try:
+ self.base.remove(pkg_spec)
+ except dnf.exceptions.MarkingError as e:
+ is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
+ if is_failure:
+ failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
+ else:
+ response['results'].append(handled_remove_error)
+ continue
+
+ installed_pkg = dnf.subject.Subject(pkg_spec).get_best_query(
+ sack=self.base.sack).installed().run()
+
+ for pkg in installed_pkg:
+ self.base.remove(str(pkg))
+
+ # Like the dnf CLI we want to allow recursive removal of dependent
+ # packages
+ self.allowerasing = True
+
+ if self.autoremove:
+ self.base.autoremove()
+
+ try:
+ # NOTE for people who go down the rabbit hole of figuring out why
+ # resolve() throws DepsolveError here on dep conflict, but not when
+ # called from the CLI: It's controlled by conf.best. When best is
+ # set, Hawkey will fail the goal, and resolve() in dnf.base.Base
+ # will throw. Otherwise if it's not set, the update (install) will
+ # be (almost silently) removed from the goal, and Hawkey will report
+ # success. Note that in this case, similar to the CLI, skip_broken
+ # does nothing to help here, so we don't take it into account at
+ # all.
+ if not self.base.resolve(allow_erasing=self.allowerasing):
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+
+ response['msg'] = "Nothing to do"
+ self.module.exit_json(**response)
+ else:
+ response['changed'] = True
+
+ # If packages got installed/removed, add them to the results.
+ # We do this early so we can use it for both check_mode and not.
+ if self.download_only:
+ install_action = 'Downloaded'
+ else:
+ install_action = 'Installed'
+ for package in self.base.transaction.install_set:
+ response['results'].append("{0}: {1}".format(install_action, package))
+ for package in self.base.transaction.remove_set:
+ response['results'].append("Removed: {0}".format(package))
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+ if self.module.check_mode:
+ response['msg'] = "Check mode: No changes made, but would have if not in check mode"
+ self.module.exit_json(**response)
+
+ try:
+ if self.download_only and self.download_dir and self.base.conf.destdir:
+ dnf.util.ensure_dir(self.base.conf.destdir)
+ self.base.repos.all().pkgdir = self.base.conf.destdir
+
+ self.base.download_packages(self.base.transaction.install_set)
+ except dnf.exceptions.DownloadError as e:
+ self.module.fail_json(
+ msg="Failed to download packages: {0}".format(to_text(e)),
+ results=[],
+ )
+
+ # Validate GPG. This is NOT done in dnf.Base (it's done in the
+ # upstream CLI subclass of dnf.Base)
+ if not self.disable_gpg_check:
+ for package in self.base.transaction.install_set:
+ fail = False
+ gpgres, gpgerr = self.base._sig_check_pkg(package)
+ if gpgres == 0: # validated successfully
+ continue
+ elif gpgres == 1: # validation failed, install cert?
+ try:
+ self.base._get_key_for_package(package)
+ except dnf.exceptions.Error as e:
+ fail = True
+ else: # fatal error
+ fail = True
+
+ if fail:
+ msg = 'Failed to validate GPG signature for {0}: {1}'.format(package, gpgerr)
+ self.module.fail_json(msg)
+
+ if self.download_only:
+ # No further work left to do, and the results were already updated above.
+ # Just return them.
+ self.module.exit_json(**response)
+ else:
+ tid = self.base.do_transaction()
+ if tid is not None:
+ transaction = self.base.history.old([tid])[0]
+ if transaction.return_code:
+ failure_response['failures'].append(transaction.output())
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+ self.module.exit_json(**response)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ response['changed'] = False
+ response['results'].append("Package already installed: {0}".format(to_native(e)))
+ self.module.exit_json(**response)
+ else:
+ failure_response['msg'] = "Unknown Error occurred: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+
+ def run(self):
+ """The main function."""
+
+ # Check if autoremove is called correctly
+ if self.autoremove:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
+ self.module.fail_json(
+ msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ # Check if download_dir is called correctly
+ if self.download_dir:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
+ self.module.fail_json(
+ msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ if self.update_cache and not self.names and not self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+ self.module.exit_json(
+ msg="Cache updated",
+ changed=False,
+ results=[],
+ rc=0
+ )
+
+ # Set state as installed by default
+ # This is not set in AnsibleModule() because the following shouldn't happen
+ # - dnf: autoremove=yes state=installed
+ if self.state is None:
+ self.state = 'installed'
+
+ if self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+ self.list_items(self.list)
+ else:
+ # Note: base takes a long time to run so we want to check for failure
+ # before running it.
+ if not self.download_only and not dnf.util.am_i_root():
+ self.module.fail_json(
+ msg="This command has to be run under the root user.",
+ results=[],
+ )
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot, self.sslverify
+ )
+
+ if self.with_modules:
+ self.module_base = dnf.module.module_base.ModuleBase(self.base)
+
+ self.ensure()
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # Extend yumdnf_argument_spec with dnf-specific features that will never be
+ # backported to yum because yum is now in "maintenance mode" upstream
+ yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
+ yumdnf_argument_spec['argument_spec']['nobest'] = dict(default=False, type='bool')
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = DnfModule(module)
+ try:
+ module_implementation.run()
+ except dnf.exceptions.RepoError as de:
+ module.fail_json(
+ msg="Failed to synchronize repodata: {0}".format(to_native(de)),
+ rc=1,
+ results=[],
+ changed=False
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
new file mode 100644
index 0000000..87cad52
--- /dev/null
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dpkg_selections
+short_description: Dpkg package selection selections
+description:
+ - Change dpkg package selection state via --get-selections and --set-selections.
+version_added: "2.0"
+author:
+- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
+options:
+ name:
+ description:
+ - Name of the package.
+ required: true
+ type: str
+ selection:
+ description:
+ - The selection state to set the package to.
+ choices: [ 'install', 'hold', 'deinstall', 'purge' ]
+ required: true
+ type: str
+extends_documentation_fragment:
+- action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ support: full
+ platforms: debian
+notes:
+ - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
+'''
+EXAMPLES = '''
+- name: Prevent python from being upgraded
+ ansible.builtin.dpkg_selections:
+ name: python
+ selection: hold
+
+- name: Allow python to be upgraded
+ ansible.builtin.dpkg_selections:
+ name: python
+ selection: install
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ selection=dict(choices=['install', 'hold', 'deinstall', 'purge'], required=True)
+ ),
+ supports_check_mode=True,
+ )
+
+ dpkg = module.get_bin_path('dpkg', True)
+
+ name = module.params['name']
+ selection = module.params['selection']
+
+ # Get current settings.
+ rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
+ if not out:
+ current = 'not present'
+ else:
+ current = out.split()[1]
+
+ changed = current != selection
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, before=current, after=selection)
+
+ module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
+ module.exit_json(changed=changed, before=current, after=selection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
new file mode 100644
index 0000000..99ffe9f
--- /dev/null
+++ b/lib/ansible/modules/expect.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: expect
+version_added: '2.0'
+short_description: Executes a command and responds to prompts
+description:
+ - The C(expect) module executes a command and responds to prompts.
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($HOME) and operations
+ like C("<"), C(">"), C("|"), and C("&") will not work.
+options:
+ command:
+ description:
+ - The command module takes command to run.
+ required: true
+ type: str
+ creates:
+ type: path
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ removes:
+ type: path
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ responses:
+ type: dict
+ description:
+ - Mapping of expected string/regex and string to respond with. If the
+ response is a list, successive matches return successive
+ responses. List functionality is new in 2.1.
+ required: true
+ timeout:
+ type: int
+ description:
+ - Amount of time in seconds to wait for the expected strings. Use
+ C(null) to disable timeout.
+ default: 30
+ echo:
+ description:
+ - Whether or not to echo out your response strings.
+ default: false
+ type: bool
+requirements:
+ - python >= 2.6
+ - pexpect >= 3.3
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), and so on), you must specify a shell in the command such as
+ C(/bin/bash -c "/path/to/something | grep else").
+ - The question, or key, under I(responses) is a python regex match. Case
+ insensitive searches are indicated with a prefix of C(?i).
+ - The C(pexpect) library used by this module operates with a search window
+ of 2000 bytes, and does not use a multiline regex match. To perform a
+ start of line bound match, use a pattern like ``(?m)^pattern``
+ - By default, if a question is encountered multiple times, its string
+ response will be repeated. If you need different responses for successive
+ question matches, instead of a string response, use a list of strings as
+ the response. The list functionality is new in 2.1.
+ - The M(ansible.builtin.expect) module is designed for simple scenarios.
+ For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
+ or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation).
+seealso:
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+author: "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = r'''
+- name: Case insensitive password string match
+ ansible.builtin.expect:
+ command: passwd username
+ responses:
+ (?i)password: "MySekretPa$$word"
+ # you don't want to show passwords in your logs
+ no_log: true
+
+- name: Generic question with multiple different responses
+ ansible.builtin.expect:
+ command: /path/to/custom/command
+ responses:
+ Question:
+ - response1
+ - response2
+ - response3
+'''
+
+import datetime
+import os
+import traceback
+
+PEXPECT_IMP_ERR = None
+try:
+ import pexpect
+ HAS_PEXPECT = True
+except ImportError:
+ PEXPECT_IMP_ERR = traceback.format_exc()
+ HAS_PEXPECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def response_closure(module, question, responses):
+ resp_gen = (b'%s\n' % to_bytes(r).rstrip(b'\n') for r in responses)
+
+ def wrapped(info):
+ try:
+ return next(resp_gen)
+ except StopIteration:
+ module.fail_json(msg="No remaining responses for '%s', "
+ "output was '%s'" %
+ (question,
+ info['child_result_list'][-1]))
+
+ return wrapped
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True),
+ chdir=dict(type='path'),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ responses=dict(type='dict', required=True),
+ timeout=dict(type='int', default=30),
+ echo=dict(type='bool', default=False),
+ )
+ )
+
+ if not HAS_PEXPECT:
+ module.fail_json(msg=missing_required_lib("pexpect"),
+ exception=PEXPECT_IMP_ERR)
+
+ chdir = module.params['chdir']
+ args = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ responses = module.params['responses']
+ timeout = module.params['timeout']
+ echo = module.params['echo']
+
+ events = dict()
+ for key, value in responses.items():
+ if isinstance(value, list):
+ response = response_closure(module, key, value)
+ else:
+ response = b'%s\n' % to_bytes(value).rstrip(b'\n')
+
+ events[to_bytes(key)] = response
+
+ if args.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ if chdir:
+ chdir = os.path.abspath(chdir)
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if os.path.exists(creates):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s exists" % creates,
+ changed=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not os.path.exists(removes):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s does not exist" % removes,
+ changed=False,
+ rc=0
+ )
+
+ startd = datetime.datetime.now()
+
+ try:
+ try:
+ # Prefer pexpect.run from pexpect>=4
+ b_out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo,
+ encoding=None)
+ except TypeError:
+ # Use pexpect._run in pexpect>=3.3,<4
+ # pexpect.run doesn't support `echo`
+ # pexpect.runu doesn't support encoding=None
+ b_out, rc = pexpect._run(args, timeout=timeout, withexitstatus=True,
+ events=events, extra_args=None, logfile=None,
+ cwd=chdir, env=None, _spawn=pexpect.spawn,
+ echo=echo)
+
+ except (TypeError, AttributeError) as e:
+ # This should catch all insufficient versions of pexpect
+ # We deem them insufficient for their lack of ability to specify
+ # to not echo responses via the run/runu functions, which would
+ # potentially leak sensitive information
+ module.fail_json(msg='Insufficient version of pexpect installed '
+ '(%s), this module requires pexpect>=3.3. '
+ 'Error was %s' % (pexpect.__version__, to_native(e)))
+ except pexpect.ExceptionPexpect as e:
+ module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if b_out is None:
+ b_out = b''
+
+ result = dict(
+ cmd=args,
+ stdout=to_native(b_out).rstrip('\r\n'),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc is None:
+ module.fail_json(msg='command exceeded timeout', **result)
+ elif rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/fail.py b/lib/ansible/modules/fail.py
new file mode 100644
index 0000000..8d3fa15
--- /dev/null
+++ b/lib/ansible/modules/fail.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fail
+short_description: Fail with custom message
+description:
+- This module fails the progress with a custom message.
+- It can be useful for bailing out when a certain condition is met using C(when).
+- This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ msg:
+ description:
+ - The customized message used for failing execution.
+ - If omitted, fail will simply bail out with a generic message.
+ type: str
+ default: Failed as requested from task
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ delegation:
+ details: Aside from C(register) and/or in combination with C(delegate_facts), it has little effect.
+ support: partial
+ platform:
+ platforms: all
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.debug
+- module: ansible.builtin.meta
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Example using fail and when together
+ ansible.builtin.fail:
+ msg: The system may not be provisioned according to the CMDB status.
+ when: cmdb_status != "to-be-staged"
+'''
diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py
new file mode 100644
index 0000000..646f78d
--- /dev/null
+++ b/lib/ansible/modules/fetch.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fetch
+short_description: Fetch files from remote nodes
+description:
+- This module works like M(ansible.builtin.copy), but in reverse.
+- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
+- Files that already exist at I(dest) will be overwritten if they are different than the I(src).
+- This module is also supported for Windows targets.
+version_added: '0.2'
+options:
+ src:
+ description:
+ - The file on the remote system to fetch.
+ - This I(must) be a file, not a directory.
+ - Recursive fetching may be supported in a later release.
+ required: yes
+ dest:
+ description:
+ - A directory to save the file into.
+ - For example, if the I(dest) directory is C(/backup) a I(src) file named C(/etc/profile) on host
+ C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
+ The host name is based on the inventory name.
+ required: yes
+ fail_on_missing:
+ version_added: '1.1'
+ description:
+ - When set to C(true), the task will fail if the remote file cannot be read for any reason.
+ - Prior to Ansible 2.5, setting this would only fail if the source file was missing.
+ - The default was changed to C(true) in Ansible 2.5.
+ type: bool
+ default: yes
+ validate_checksum:
+ version_added: '1.4'
+ description:
+ - Verify that the source and destination checksums match after the files are fetched.
+ type: bool
+ default: yes
+ flat:
+ version_added: '1.2'
+ description:
+ - Allows you to override the default behavior of appending hostname/path/to/file to the destination.
+ - If C(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
+ - This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
+ - If using multiple hosts with the same filename, the file will be overwritten for each host.
+ type: bool
+ default: no
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix, windows
+ safe_file_operations:
+ support: none
+ vault:
+ support: none
+notes:
+- When running fetch with C(become), the M(ansible.builtin.slurp) module will also be
+ used to fetch the contents of the file for determining the remote
+ checksum. This effectively doubles the transfer size, and
+ depending on the file size can consume all available memory on the
+ remote or local hosts causing a C(MemoryError). Due to this it is
+ advisable to run this module without C(become) whenever possible.
+- Prior to Ansible 2.5 this module would not fail if reading the remote
+ file was impossible unless C(fail_on_missing) was set.
+- In Ansible 2.5 or later, playbook authors are encouraged to use
+ C(fail_when) or C(ignore_errors) to get this ability. They may
+ also explicitly set C(fail_on_missing) to C(false) to get the
+ non-failing behaviour.
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.slurp
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
+ ansible.builtin.fetch:
+ src: /tmp/somefile
+ dest: /tmp/fetched
+
+- name: Specifying a path directly
+ ansible.builtin.fetch:
+ src: /tmp/somefile
+ dest: /tmp/prefix-{{ inventory_hostname }}
+ flat: yes
+
+- name: Specifying a destination path
+ ansible.builtin.fetch:
+ src: /tmp/uniquefile
+ dest: /tmp/special/
+ flat: yes
+
+- name: Storing in a path relative to the playbook
+ ansible.builtin.fetch:
+ src: /tmp/uniquefile
+ dest: special/prefix-{{ inventory_hostname }}
+ flat: yes
+'''
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
new file mode 100644
index 0000000..72b510c
--- /dev/null
+++ b/lib/ansible/modules/file.py
@@ -0,0 +1,987 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: file
+version_added: historical
+short_description: Manage files and file properties
+extends_documentation_fragment: [files, action_common_attributes]
+description:
+- Set attributes of files, directories, or symlinks and their targets.
+- Alternatively, remove files, symlinks or directories.
+- Many other modules support the same options as the C(file) module - including M(ansible.builtin.copy),
+ M(ansible.builtin.template), and M(ansible.builtin.assemble).
+- For Windows targets, use the M(ansible.windows.win_file) module instead.
+options:
+ path:
+ description:
+ - Path to the file being managed.
+ type: path
+ required: yes
+ aliases: [ dest, name ]
+ state:
+ description:
+ - If C(absent), directories will be recursively deleted, and files or symlinks will
+ be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
+ under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
+ not exist as the state did not change.
+ - If C(directory), all intermediate subdirectories will be created if they
+ do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
+ - If C(file), with no other options, returns the current state of C(path).
+ - If C(file), even with other options (such as C(mode)), the file will be modified if it exists but will NOT be created if it does not exist.
+ Set to C(touch) or use the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want to create the file if it does not exist.
+ - If C(hard), the hard link will be created or changed.
+ - If C(link), the symbolic link will be created or changed.
+ - If C(touch) (new in 1.4), an empty file will be created if the file does not
+ exist, while an existing file or directory will receive updated file access and
+ modification times (similar to the way C(touch) works from the command line).
+ - Default is the current state of the file if it exists, C(directory) if C(recurse=yes), or C(file) otherwise.
+ type: str
+ choices: [ absent, directory, file, hard, link, touch ]
+ src:
+ description:
+ - Path of the file to link to.
+ - This applies only to C(state=link) and C(state=hard).
+ - For C(state=link), this will also accept a non-existing path.
+ - Relative paths are relative to the file being created (C(path)) which is how
+ the Unix command C(ln -s SRC DEST) treats relative paths.
+ type: path
+ recurse:
+ description:
+ - Recursively set the specified file attributes on directory contents.
+ - This applies only when C(state) is set to C(directory).
+ type: bool
+ default: no
+ version_added: '1.1'
+ force:
+ description:
+ - >
+ Force the creation of the symlinks in two cases: the source file does
+ not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
+ C(path) file and create symlink to the C(src) file in place of it).
+ type: bool
+ default: no
+ follow:
+ description:
+ - This flag indicates that filesystem links, if they exist, should be followed.
+ - I(follow=yes) and I(state=link) can modify I(src) when combined with parameters such as I(mode).
+ - Previous to Ansible 2.5, this was C(false) by default.
+ type: bool
+ default: yes
+ version_added: '1.8'
+ modification_time:
+ description:
+ - This parameter indicates the time the file's modification time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: "2.7"
+ modification_time_format:
+ description:
+ - When used with C(modification_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+ access_time:
+ description:
+ - This parameter indicates the time the file's access time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: '2.7'
+ access_time_format:
+ description:
+ - When used with C(access_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+seealso:
+- module: ansible.builtin.assemble
+- module: ansible.builtin.copy
+- module: ansible.builtin.stat
+- module: ansible.builtin.template
+- module: ansible.windows.win_file
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ details: permissions and ownership will be shown but file contents on absent/touch will not.
+ support: partial
+ platform:
+ platforms: posix
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Change file ownership, group and permissions
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Give insecure permissions to an existing file
+ ansible.builtin.file:
+ path: /work
+ owner: root
+ group: root
+ mode: '1777'
+
+- name: Create a symbolic link
+ ansible.builtin.file:
+ src: /file/to/link/to
+ dest: /path/to/symlink
+ owner: foo
+ group: foo
+ state: link
+
+- name: Create two hard links
+ ansible.builtin.file:
+ src: '/tmp/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ state: hard
+ loop:
+ - { src: x, dest: y }
+ - { src: z, dest: k }
+
+- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u=rw,g=r,o=r
+
+- name: Touch the same file, but add/remove some permissions
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+
+- name: Touch again the same file, but do not change times this makes the task idempotent
+ ansible.builtin.file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+ modification_time: preserve
+ access_time: preserve
+
+- name: Create a directory if it does not exist
+ ansible.builtin.file:
+ path: /etc/some_directory
+ state: directory
+ mode: '0755'
+
+- name: Update modification and access time of given file
+ ansible.builtin.file:
+ path: /etc/some_file
+ state: file
+ modification_time: now
+ access_time: now
+
+- name: Set access time based on seconds from epoch value
+ ansible.builtin.file:
+ path: /etc/another_file
+ state: file
+ access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
+
+- name: Recursively change ownership of a directory
+ ansible.builtin.file:
+ path: /etc/foo
+ state: directory
+ recurse: yes
+ owner: foo
+ group: foo
+
+- name: Remove file (delete file)
+ ansible.builtin.file:
+ path: /etc/foo.txt
+ state: absent
+
+- name: Recursively remove directory
+ ansible.builtin.file:
+ path: /etc/foo
+ state: absent
+
+'''
+RETURN = r'''
+dest:
+ description: Destination file/path, equal to the value passed to I(path).
+ returned: state=touch, state=hard, state=link
+ type: str
+ sample: /path/to/file.txt
+path:
+ description: Destination file/path, equal to the value passed to I(path).
+ returned: state=absent, state=directory, state=file
+ type: str
+ sample: /path/to/file.txt
+'''
+
+import errno
+import os
+import shutil
+import sys
+import time
+
+from pwd import getpwnam, getpwuid
+from grp import getgrnam, getgrgid
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+# There will only be a single AnsibleModule object per module
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return 'AnsibleModuleError(results={0})'.format(self.results)
+
+
+class ParameterError(AnsibleModuleError):
+ pass
+
+
+class Sentinel(object):
+ def __new__(cls, *args, **kwargs):
+ return cls
+
+
+def _ansible_excepthook(exc_type, exc_value, tb):
+ # Using an exception allows us to catch it if the calling code knows it can recover
+ if issubclass(exc_type, AnsibleModuleError):
+ module.fail_json(**exc_value.results)
+ else:
+ sys.__excepthook__(exc_type, exc_value, tb)
+
+
+def additional_parameter_handling(params):
+ """Additional parameter validation and reformatting"""
+ # When path is a directory, rewrite the pathname to be the file inside of the directory
+ # TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
+ # I think this is where we want to be in the future:
+ # when isdir(path):
+ # if state == absent: Remove the directory
+ # if state == touch: Touch the directory
+ # if state == directory: Assert the directory is the same as the one specified
+ # if state == file: place inside of the directory (use _original_basename)
+ # if state == link: place inside of the directory (use _original_basename. Fallback to src?)
+ # if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
+ if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
+ basename = None
+
+ if params['_original_basename']:
+ basename = params['_original_basename']
+ elif params['src']:
+ basename = os.path.basename(params['src'])
+
+ if basename:
+ params['path'] = os.path.join(params['path'], basename)
+
+ # state should default to file, but since that creates many conflicts,
+ # default state to 'current' when it exists.
+ prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
+
+ if params['state'] is None:
+ if prev_state != 'absent':
+ params['state'] = prev_state
+ elif params['recurse']:
+ params['state'] = 'directory'
+ else:
+ params['state'] = 'file'
+
+ # make sure the target path is a directory when we're doing a recursive operation
+ if params['recurse'] and params['state'] != 'directory':
+ raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
+ "path": params["path"]})
+
+ # Fail if 'src' but no 'state' is specified
+ if params['src'] and params['state'] not in ('link', 'hard'):
+ raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
+ 'path': params['path']})
+
+
+def get_state(path):
+ ''' Find out current state '''
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ try:
+ if os.path.lexists(b_path):
+ if os.path.islink(b_path):
+ return 'link'
+ elif os.path.isdir(b_path):
+ return 'directory'
+ elif os.stat(b_path).st_nlink > 1:
+ return 'hard'
+
+ # could be many other things, but defaulting to file
+ return 'file'
+
+ return 'absent'
+ except OSError as e:
+ if e.errno == errno.ENOENT: # It may already have been removed
+ return 'absent'
+ else:
+ raise
+
+
+# This should be moved into the common file utilities
+def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
+ changed = False
+
+ try:
+ for b_root, b_dirs, b_files in os.walk(b_path):
+ for b_fsobj in b_dirs + b_files:
+ b_fsname = os.path.join(b_root, b_fsobj)
+ if not os.path.islink(b_fsname):
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ else:
+ # Change perms on the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ if follow:
+ b_fsname = os.path.join(b_root, os.readlink(b_fsname))
+ # The link target could be nonexistent
+ if os.path.exists(b_fsname):
+ if os.path.isdir(b_fsname):
+ # Link is a directory so change perms on the directory's contents
+ changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
+
+ # Change perms on the file pointed to by the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+ except RuntimeError as e:
+ # on Python3 "RecursionError" is raised which is derived from "RuntimeError"
+ # TODO once this function is moved into the common file utilities, this should probably raise more general exception
+ raise AnsibleModuleError(
+ results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
+ )
+
+ return changed
+
+
+def initial_diff(path, state, prev_state):
+ diff = {'before': {'path': path},
+ 'after': {'path': path},
+ }
+
+ if prev_state != state:
+ diff['before']['state'] = prev_state
+ diff['after']['state'] = state
+ if state == 'absent' and prev_state == 'directory':
+ walklist = {
+ 'directories': [],
+ 'files': [],
+ }
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ for base_path, sub_folders, files in os.walk(b_path):
+ for folder in sub_folders:
+ folderpath = os.path.join(base_path, folder)
+ walklist['directories'].append(folderpath)
+
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ walklist['files'].append(filepath)
+
+ diff['before']['path_content'] = walklist
+
+ return diff
+
+#
+# States
+#
+
+
+def get_timestamp_for_time(formatted_time, time_format):
+ if formatted_time == 'preserve':
+ return None
+ elif formatted_time == 'now':
+ return Sentinel
+ else:
+ try:
+ struct = time.strptime(formatted_time, time_format)
+ struct_time = time.mktime(struct)
+ except (ValueError, OverflowError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
+ % (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
+
+ return struct_time
+
+
+def update_timestamp_for_file(path, mtime, atime, diff=None):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ try:
+ # When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
+ # https://github.com/ansible/ansible/issues/50943
+ if mtime is Sentinel and atime is Sentinel:
+ # It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
+ # not be updated. Just use the current time for the diff values
+ mtime = atime = time.time()
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ set_time = None
+ else:
+ # If both parameters are None 'preserve', nothing to do
+ if mtime is None and atime is None:
+ return False
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ if mtime is None:
+ mtime = previous_mtime
+ elif mtime is Sentinel:
+ mtime = time.time()
+
+ if atime is None:
+ atime = previous_atime
+ elif atime is Sentinel:
+ atime = time.time()
+
+ # If both timestamps are already ok, nothing to do
+ if mtime == previous_mtime and atime == previous_atime:
+ return False
+
+ set_time = (atime, mtime)
+
+ if not module.check_mode:
+ os.utime(b_path, set_time)
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ if 'after' not in diff:
+ diff['after'] = {}
+ if mtime != previous_mtime:
+ diff['before']['mtime'] = previous_mtime
+ diff['after']['mtime'] = mtime
+ if atime != previous_atime:
+ diff['before']['atime'] = previous_atime
+ diff['after']['atime'] = atime
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
+ % to_native(e, nonstring='simplerepr'), 'path': path})
+ return True
+
+
+def keep_backward_compatibility_on_timestamps(parameter, state):
+ if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
+ return 'preserve'
+ elif state == 'touch' and parameter is None:
+ return 'now'
+ else:
+ return parameter
+
+
+def execute_diff_peek(path):
+ """Take a guess as to whether a file is a binary file"""
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ appears_binary = False
+ try:
+ with open(b_path, 'rb') as f:
+ head = f.read(8192)
+ except Exception:
+ # If we can't read the file, we're okay assuming it's text
+ pass
+ else:
+ if b"\x00" in head:
+ appears_binary = True
+
+ return appears_binary
+
+
+def ensure_absent(path):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ result = {}
+
+ if prev_state != 'absent':
+ diff = initial_diff(path, 'absent', prev_state)
+
+ if not module.check_mode:
+ if prev_state == 'directory':
+ try:
+ shutil.rmtree(b_path, ignore_errors=False)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
+ else:
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
+ 'path': path})
+
+ result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
+ else:
+ result.update({'path': path, 'changed': False, 'state': 'absent'})
+
+ return result
+
+
+def execute_touch(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ changed = False
+ result = {'dest': path}
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # If the file did not already exist
+ if prev_state == 'absent':
+ # if we are in check mode and the file is absent
+ # we can set the changed status to True and return
+ if module.check_mode:
+ result['changed'] = True
+ return result
+ # Create an empty file
+ try:
+ open(b_path, 'wb').close()
+ changed = True
+ except (OSError, IOError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ # Update the attributes on the file
+ diff = initial_diff(path, 'touch', prev_state)
+ file_args = module.load_file_common_arguments(module.params)
+ try:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except SystemExit as e:
+ if e.code: # this is the exit code passed to sys.exit, not a constant -- pylint: disable=using-constant-test
+ # We take this to mean that fail_json() was called from
+ # somewhere in basic.py
+ if prev_state == 'absent':
+ # If we just created the file we can safely remove it
+ os.remove(b_path)
+ raise
+
+ result['changed'] = changed
+ result['diff'] = diff
+ return result
+
+
+def ensure_file_attributes(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ if prev_state != 'file':
+ if follow and prev_state == 'link':
+ # follow symlink and operate on original
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ prev_state = get_state(b_path)
+ file_args['path'] = path
+
+ if prev_state not in ('file', 'hard'):
+ # file is not absent and any other state is a conflict
+ raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
+ 'path': path, 'state': prev_state})
+
+ diff = initial_diff(path, 'file', prev_state)
+ changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_directory(path, follow, recurse, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # For followed symlinks, we need to operate on the target of the link
+ if follow and prev_state == 'link':
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ file_args['path'] = path
+ prev_state = get_state(b_path)
+
+ changed = False
+ diff = initial_diff(path, 'directory', prev_state)
+
+ if prev_state == 'absent':
+ # Create directory and assign permissions to it
+ if module.check_mode:
+ return {'path': path, 'changed': True, 'diff': diff}
+ curpath = ''
+
+ try:
+ # Split the path so we can apply filesystem attributes recursively
+ # from the root (/) directory for absolute paths or the base path
+ # of a relative path. We can then walk the appropriate directory
+ # path to apply attributes.
+ # Something like mkdir -p with mode applied to all of the newly created directories
+ for dirname in path.strip('/').split('/'):
+ curpath = '/'.join([curpath, dirname])
+ # Remove leading slash if we're creating a relative path
+ if not os.path.isabs(path):
+ curpath = curpath.lstrip('/')
+ b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
+ if not os.path.exists(b_curpath):
+ try:
+ os.mkdir(b_curpath)
+ changed = True
+ except OSError as ex:
+ # Possibly something else created the dir since the os.path.exists
+ # check above. As long as it's a dir, we don't need to error out.
+ if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
+ raise
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = curpath
+ changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
+ ' %s' % (curpath, to_native(e)),
+ 'path': path})
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+ elif prev_state != 'directory':
+ # We already know prev_state is not 'absent', therefore it exists in some form.
+ raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
+ 'path': path})
+
+ #
+ # previous state == directory
+ #
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ if recurse:
+ changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
+
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_symlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+ # source is both the source of a symlink or an informational passing of the src for a template module
+ # or copy module, even if this module never uses it, it is needed to key off some things
+ if src is None:
+ if follow and os.path.exists(b_path):
+ # use the current target of the link as the source
+ src = to_native(os.readlink(b_path), errors='strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+
+ if not os.path.islink(b_path) and os.path.isdir(b_path):
+ relpath = path
+ else:
+ b_relpath = os.path.dirname(b_path)
+ relpath = to_native(b_relpath, errors='strict')
+
+ # If src is None that means we are expecting to update an existing link.
+ if src is None:
+ absrc = None
+ else:
+ absrc = os.path.join(relpath, src)
+
+ b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
+ if not force and src is not None and not os.path.exists(b_absrc):
+ raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
+ ' really want to create the link: %s' % absrc,
+ 'path': path, 'src': src})
+
+ if prev_state == 'directory':
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+ elif os.listdir(b_path):
+ # refuse to replace a directory that has files in it
+ raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
+ ' convert it' % path,
+ 'path': path})
+ elif prev_state in ('file', 'hard') and not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+
+ diff = initial_diff(path, 'link', prev_state)
+ changed = False
+
+ if prev_state in ('hard', 'file', 'directory', 'absent'):
+ if src is None:
+ raise AnsibleModuleError(results={'msg': 'src is required for creating new symlinks'})
+ changed = True
+ elif prev_state == 'link':
+ if src is not None:
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ os.rmdir(b_path)
+ os.symlink(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.symlink(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ # Now that we might have created the symlink, get the arguments.
+ # We need to do it now so we can properly follow the symlink if needed
+ # because load_file_common_arguments sets 'path' according
+ # the value of follow and the symlink existence.
+ file_args = module.load_file_common_arguments(module.params)
+
+ # Whenever we create a link to a nonexistent target we know that the nonexistent target
+ # cannot have any permissions set on it. Skip setting those and emit a warning (the user
+ # can set follow=False to remove the warning)
+ if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
+ module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
+ ' set to False to avoid this.')
+ else:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def ensure_hardlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # src is the source of a hardlink. We require it if we are creating a new hardlink.
+ # We require path in the argument_spec so we know it is present at this point.
+ if prev_state != 'hard' and src is None:
+ raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
+
+ # Even if the link already exists, if src was specified it needs to exist.
+ # The inode number will be compared to ensure the link has the correct target.
+ if src is not None and not os.path.exists(b_src):
+ raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
+
+ diff = initial_diff(path, 'hard', prev_state)
+ changed = False
+
+ if prev_state == 'absent':
+ changed = True
+ elif prev_state == 'link':
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ elif prev_state == 'hard':
+ if src is not None and not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
+ 'dest': path, 'src': src})
+ elif prev_state == 'file':
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
+ 'dest': path, 'src': src})
+ elif prev_state == 'directory':
+ changed = True
+ if os.path.exists(b_path):
+ if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ return {'path': path, 'changed': False}
+ elif not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
+ 'dest': path, 'src': src})
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ if os.path.exists(b_path):
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise
+ os.link(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.link(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def check_owner_exists(module, owner):
+ try:
+ uid = int(owner)
+ try:
+ getpwuid(uid).pw_name
+ except KeyError:
+ module.warn('failed to look up user with uid %s. Create user up to this point in real play' % uid)
+ except ValueError:
+ try:
+ getpwnam(owner).pw_uid
+ except KeyError:
+ module.warn('failed to look up user %s. Create user up to this point in real play' % owner)
+
+
+def check_group_exists(module, group):
+ try:
+ gid = int(group)
+ try:
+ getgrgid(gid).gr_name
+ except KeyError:
+ module.warn('failed to look up group with gid %s. Create group up to this point in real play' % gid)
+ except ValueError:
+ try:
+ getgrnam(group).gr_gid
+ except KeyError:
+ module.warn('failed to look up group %s. Create group up to this point in real play' % group)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ _original_basename=dict(type='str'), # Internal use only, for recursive ops
+ recurse=dict(type='bool', default=False),
+ force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
+ follow=dict(type='bool', default=True), # Note: Different default than file_common_args
+ _diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
+ src=dict(type='path'), # Note: Should not be in file_common_args in future
+ modification_time=dict(type='str'),
+ modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ access_time=dict(type='str'),
+ access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
+ sys.excepthook = _ansible_excepthook
+ additional_parameter_handling(module.params)
+ params = module.params
+
+ state = params['state']
+ recurse = params['recurse']
+ force = params['force']
+ follow = params['follow']
+ path = params['path']
+ src = params['src']
+
+ if module.check_mode and state != 'absent':
+ file_args = module.load_file_common_arguments(module.params)
+ if file_args['owner']:
+ check_owner_exists(module, file_args['owner'])
+ if file_args['group']:
+ check_group_exists(module, file_args['group'])
+
+ timestamps = {}
+ timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
+ timestamps['modification_time_format'] = params['modification_time_format']
+ timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
+ timestamps['access_time_format'] = params['access_time_format']
+
+ # short-circuit for diff_peek
+ if params['_diff_peek'] is not None:
+ appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
+ module.exit_json(path=path, changed=False, appears_binary=appears_binary)
+
+ if state == 'file':
+ result = ensure_file_attributes(path, follow, timestamps)
+ elif state == 'directory':
+ result = ensure_directory(path, follow, recurse, timestamps)
+ elif state == 'link':
+ result = ensure_symlink(path, src, follow, force, timestamps)
+ elif state == 'hard':
+ result = ensure_hardlink(path, src, follow, force, timestamps)
+ elif state == 'touch':
+ result = execute_touch(path, follow, timestamps)
+ elif state == 'absent':
+ result = ensure_absent(path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
new file mode 100644
index 0000000..b13c841
--- /dev/null
+++ b/lib/ansible/modules/find.py
@@ -0,0 +1,534 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: find
+author: Brian Coca (@bcoca)
+version_added: "2.0"
+short_description: Return a list of files based on specific criteria
+description:
+ - Return a list of files based on specific criteria. Multiple criteria are AND'd together.
+ - For Windows targets, use the M(ansible.windows.win_find) module instead.
+options:
+ age:
+ description:
+ - Select files whose age is equal to or greater than the specified time.
+ - Use a negative age to find files equal to or less than the specified time.
+ - You can choose seconds, minutes, hours, days, or weeks by specifying the
+ first letter of any of those words (e.g., "1w").
+ type: str
+ patterns:
+ default: []
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
+ - The patterns restrict the list of files to be returned to those whose basenames match at
+ least one of the patterns specified. Multiple patterns can be specified using a list.
+ - The pattern is matched against the file base name, excluding the directory.
+ - When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
+ if you are looking to match all files ending in .default, you'd need to use C(.*\.default)
+ as a regexp and not just C(\.default).
+ - This parameter expects a list, which can be either comma separated or YAML. If any of the
+ patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
+ in undesirable ways.
+ - Defaults to C(*) when I(use_regex=False), or C(.*) when I(use_regex=True).
+ type: list
+ aliases: [ pattern ]
+ elements: str
+ excludes:
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by I(use_regex) option.
+ - Items whose basenames match an I(excludes) pattern are culled from I(patterns) matches.
+ Multiple patterns can be specified using a list.
+ type: list
+ aliases: [ exclude ]
+ version_added: "2.5"
+ elements: str
+ contains:
+ description:
+ - A regular expression or pattern which should be matched against the file content.
+ - Works only when I(file_type) is C(file).
+ type: str
+ read_whole_file:
+ description:
+ - When doing a C(contains) search, determines whether the whole file should be read into
+ memory or if the regex should be applied to the file line-by-line.
+ - Setting this to C(true) can have performance and memory implications for large files.
+ - This uses C(re.search()) instead of C(re.match()).
+ type: bool
+ default: false
+ version_added: "2.11"
+ paths:
+ description:
+ - List of paths of directories to search. All paths must be fully qualified.
+ type: list
+ required: true
+ aliases: [ name, path ]
+ elements: str
+ file_type:
+ description:
+ - Type of file to select.
+ - The 'link' and 'any' choices were added in Ansible 2.3.
+ type: str
+ choices: [ any, directory, file, link ]
+ default: file
+ recurse:
+ description:
+ - If target is a directory, recursively descend into the directory looking for files.
+ type: bool
+ default: no
+ size:
+ description:
+ - Select files whose size is equal to or greater than the specified size.
+ - Use a negative size to find files equal to or less than the specified size.
+ - Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
+ bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
+ - Size is not evaluated for directories.
+ type: str
+ age_stamp:
+ description:
+ - Choose the file property against which we compare age.
+ type: str
+ choices: [ atime, ctime, mtime ]
+ default: mtime
+ hidden:
+ description:
+ - Set this to C(true) to include hidden files, otherwise they will be ignored.
+ type: bool
+ default: no
+ follow:
+ description:
+ - Set this to C(true) to follow symlinks in path for systems with python 2.6+.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Set this to C(true) to retrieve a file's SHA1 checksum.
+ type: bool
+ default: no
+ use_regex:
+ description:
+ - If C(false), the patterns are file globs (shell).
+ - If C(true), they are python regexes.
+ type: bool
+ default: no
+ depth:
+ description:
+ - Set the maximum number of levels to descend into.
+ - Setting recurse to C(false) will override this value, which is effectively depth 1.
+ - Default is unlimited depth.
+ type: int
+ version_added: "2.6"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ details: since this action does not modify the target it just executes normally during check mode
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.windows.win_find
+'''
+
+
+EXAMPLES = r'''
+- name: Recursively find /tmp files older than 2 days
+ ansible.builtin.find:
+ paths: /tmp
+ age: 2d
+ recurse: yes
+
+- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
+ ansible.builtin.find:
+ paths: /tmp
+ age: 4w
+ size: 1m
+ recurse: yes
+
+- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
+ ansible.builtin.find:
+ paths: /var/tmp
+ age: 3600
+ age_stamp: atime
+ recurse: yes
+
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
+ ansible.builtin.find:
+ paths: /var/log
+ patterns: '*.old,*.log.gz'
+ size: 10m
+
+# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
+ ansible.builtin.find:
+ paths: /var/log
+ patterns: "^.*?\\.(?:old|log\\.gz)$"
+ size: 10m
+ use_regex: yes
+
+- name: Find /var/log all directories, exclude nginx and mysql
+ ansible.builtin.find:
+ paths: /var/log
+ recurse: no
+ file_type: directory
+ excludes: 'nginx,mysql'
+
+# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
+- name: Use a single pattern that contains a comma formatted as a list
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns: ['^_[0-9]{2,4}_.*.log$']
+
+- name: Use multiple patterns that contain a comma formatted as a YAML list
+ ansible.builtin.find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns:
+ - '^_[0-9]{2,4}_.*.log$'
+ - '^[a-z]{1,5}_.*log$'
+
+'''
+
+RETURN = r'''
+files:
+ description: All matches found with the specified criteria (see stat module for full output of each dictionary)
+ returned: success
+ type: list
+ sample: [
+ { path: "/var/tmp/test1",
+ mode: "0644",
+ "...": "...",
+ checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
+ },
+ { path: "/var/tmp/test2",
+ "...": "..."
+ },
+ ]
+matched:
+ description: Number of matches
+ returned: success
+ type: int
+ sample: 14
+examined:
+ description: Number of filesystem objects looked at
+ returned: success
+ type: int
+ sample: 34
+skipped_paths:
+ description: skipped paths and reasons they were skipped
+ returned: success
+ type: dict
+ sample: {"/laskdfj": "'/laskdfj' is not a directory"}
+ version_added: '2.12'
+'''
+
+import fnmatch
+import grp
+import os
+import pwd
+import re
+import stat
+import time
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def pfilter(f, patterns=None, excludes=None, use_regex=False):
+ '''filter using glob patterns'''
+ if not patterns and not excludes:
+ return True
+
+ if use_regex:
+ if patterns and not excludes:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ for e in excludes:
+ r = re.compile(e)
+ if r.match(f):
+ return False
+ return True
+
+ else:
+ if patterns and not excludes:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ for e in excludes:
+ if fnmatch.fnmatch(f, e):
+ return False
+ return True
+
+ return False
+
+
+def agefilter(st, now, age, timestamp):
+ '''filter files older than age'''
+ if age is None:
+ return True
+ elif age >= 0 and now - getattr(st, "st_%s" % timestamp) >= abs(age):
+ return True
+ elif age < 0 and now - getattr(st, "st_%s" % timestamp) <= abs(age):
+ return True
+ return False
+
+
+def sizefilter(st, size):
+ '''filter files greater than size'''
+ if size is None:
+ return True
+ elif size >= 0 and st.st_size >= abs(size):
+ return True
+ elif size < 0 and st.st_size <= abs(size):
+ return True
+ return False
+
+
+def contentfilter(fsname, pattern, read_whole_file=False):
+ """
+ Filter files which contain the given expression
+ :arg fsname: Filename to scan for lines matching a pattern
+ :arg pattern: Pattern to look for inside of line
+ :arg read_whole_file: If true, the whole file is read into memory before the regex is applied against it. Otherwise, the regex is applied line-by-line.
+ :rtype: bool
+ :returns: True if one of the lines in fsname matches the pattern. Otherwise False
+ """
+ if pattern is None:
+ return True
+
+ prog = re.compile(pattern)
+
+ try:
+ with open(fsname) as f:
+ if read_whole_file:
+ return bool(prog.search(f.read()))
+
+ for line in f:
+ if prog.match(line):
+ return True
+
+ except Exception:
+ pass
+
+ return False
+
+
+def statinfo(st):
+ pw_name = ""
+ gr_name = ""
+
+ try: # user data
+ pw_name = pwd.getpwuid(st.st_uid).pw_name
+ except Exception:
+ pass
+
+ try: # group data
+ gr_name = grp.getgrgid(st.st_gid).gr_name
+ except Exception:
+ pass
+
+ return {
+ 'mode': "%04o" % stat.S_IMODE(st.st_mode),
+ 'isdir': stat.S_ISDIR(st.st_mode),
+ 'ischr': stat.S_ISCHR(st.st_mode),
+ 'isblk': stat.S_ISBLK(st.st_mode),
+ 'isreg': stat.S_ISREG(st.st_mode),
+ 'isfifo': stat.S_ISFIFO(st.st_mode),
+ 'islnk': stat.S_ISLNK(st.st_mode),
+ 'issock': stat.S_ISSOCK(st.st_mode),
+ 'uid': st.st_uid,
+ 'gid': st.st_gid,
+ 'size': st.st_size,
+ 'inode': st.st_ino,
+ 'dev': st.st_dev,
+ 'nlink': st.st_nlink,
+ 'atime': st.st_atime,
+ 'mtime': st.st_mtime,
+ 'ctime': st.st_ctime,
+ 'gr_name': gr_name,
+ 'pw_name': pw_name,
+ 'wusr': bool(st.st_mode & stat.S_IWUSR),
+ 'rusr': bool(st.st_mode & stat.S_IRUSR),
+ 'xusr': bool(st.st_mode & stat.S_IXUSR),
+ 'wgrp': bool(st.st_mode & stat.S_IWGRP),
+ 'rgrp': bool(st.st_mode & stat.S_IRGRP),
+ 'xgrp': bool(st.st_mode & stat.S_IXGRP),
+ 'woth': bool(st.st_mode & stat.S_IWOTH),
+ 'roth': bool(st.st_mode & stat.S_IROTH),
+ 'xoth': bool(st.st_mode & stat.S_IXOTH),
+ 'isuid': bool(st.st_mode & stat.S_ISUID),
+ 'isgid': bool(st.st_mode & stat.S_ISGID),
+ }
+
+
+def handle_walk_errors(e):
+ raise e
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ paths=dict(type='list', required=True, aliases=['name', 'path'], elements='str'),
+ patterns=dict(type='list', default=[], aliases=['pattern'], elements='str'),
+ excludes=dict(type='list', aliases=['exclude'], elements='str'),
+ contains=dict(type='str'),
+ read_whole_file=dict(type='bool', default=False),
+ file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
+ age=dict(type='str'),
+ age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
+ size=dict(type='str'),
+ recurse=dict(type='bool', default=False),
+ hidden=dict(type='bool', default=False),
+ follow=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=False),
+ use_regex=dict(type='bool', default=False),
+ depth=dict(type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+
+ # Set the default match pattern to either a match-all glob or
+ # regex depending on use_regex being set. This makes sure if you
+ # set excludes: without a pattern pfilter gets something it can
+ # handle.
+ if not params['patterns']:
+ if params['use_regex']:
+ params['patterns'] = ['.*']
+ else:
+ params['patterns'] = ['*']
+
+ filelist = []
+ skipped = {}
+
+ if params['age'] is None:
+ age = None
+ else:
+ # convert age to seconds:
+ m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
+ seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
+ if m:
+ age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(age=params['age'], msg="failed to process age")
+
+ if params['size'] is None:
+ size = None
+ else:
+ # convert size to bytes:
+ m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
+ bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
+ if m:
+ size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(size=params['size'], msg="failed to process size")
+
+ now = time.time()
+ msg = 'All paths examined'
+ looked = 0
+ has_warnings = False
+ for npath in params['paths']:
+ npath = os.path.expanduser(os.path.expandvars(npath))
+ try:
+ if not os.path.isdir(npath):
+ raise Exception("'%s' is not a directory" % to_native(npath))
+
+ for root, dirs, files in os.walk(npath, onerror=handle_walk_errors, followlinks=params['follow']):
+ looked = looked + len(files) + len(dirs)
+ for fsobj in (files + dirs):
+ fsname = os.path.normpath(os.path.join(root, fsobj))
+ if params['depth']:
+ wpath = npath.rstrip(os.path.sep) + os.path.sep
+ depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
+ if depth > params['depth']:
+ # Empty the list used by os.walk to avoid traversing deeper unnecessarily
+ del dirs[:]
+ continue
+ if os.path.basename(fsname).startswith('.') and not params['hidden']:
+ continue
+
+ try:
+ st = os.lstat(fsname)
+ except (IOError, OSError) as e:
+ module.warn("Skipped entry '%s' due to this access issue: %s\n" % (fsname, to_text(e)))
+ skipped[fsname] = to_text(e)
+ has_warnings = True
+ continue
+
+ r = {'path': fsname}
+ if params['file_type'] == 'any':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ if stat.S_ISREG(st.st_mode) and params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+
+ if stat.S_ISREG(st.st_mode):
+ if sizefilter(st, size):
+ filelist.append(r)
+ else:
+ filelist.append(r)
+
+ elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
+ agefilter(st, now, age, params['age_stamp']) and \
+ sizefilter(st, size) and contentfilter(fsname, params['contains'], params['read_whole_file']):
+
+ r.update(statinfo(st))
+ if params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+ filelist.append(r)
+
+ elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ if not params['recurse']:
+ break
+ except Exception as e:
+ skipped[npath] = to_text(e)
+ module.warn("Skipped '%s' path due to this access issue: %s\n" % (to_text(npath), skipped[npath]))
+ has_warnings = True
+
+ if has_warnings:
+ msg = 'Not all paths examined, check warnings for details'
+ matched = len(filelist)
+ module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked, skipped_paths=skipped)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
new file mode 100644
index 0000000..b099cd8
--- /dev/null
+++ b/lib/ansible/modules/gather_facts.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gather_facts
+version_added: 2.8
+short_description: Gathers facts about remote hosts
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+ - action_common_attributes.flow
+description:
+ - This module takes care of executing the R(configured facts modules,FACTS_MODULES), the default is to use the M(ansible.builtin.setup) module.
+ - This module is automatically called by playbooks to gather useful variables about remote hosts that can be used in playbooks.
+ - It can also be executed directly by C(/usr/bin/ansible) to check what variables are available to a host.
+ - Ansible provides many I(facts) about the system, automatically.
+options:
+ parallel:
+ description:
+ - A toggle that controls if the fact modules are executed in parallel or serially and in order.
+ This can guarantee the merge order of module facts at the expense of performance.
+ - By default it will be true if more than one fact module is used.
+ type: bool
+attributes:
+ action:
+ support: full
+ async:
+ details: multiple modules can be executed in parallel or serially, but the action itself will not be async
+ support: partial
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: since this action should just query the target system info it always runs in check mode
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ details: The action plugin should be able to automatically select the specific platform modules automatically or can be configured manually
+ platforms: all
+notes:
+ - This is mostly a wrapper around other fact gathering modules.
+ - Options passed into this action must be supported by all the underlying fact modules configured.
+ - Facts returned by each module will be merged, conflicts will favor 'last merged'.
+ Order is not guaranteed, when doing parallel gathering on multiple modules.
+author:
+ - "Ansible Core Team"
+'''
+
+RETURN = """
+# depends on the fact module called
+"""
+
+EXAMPLES = """
+# Display facts from all hosts and store them indexed by hostname at /tmp/facts.
+# ansible all -m ansible.builtin.gather_facts --tree /tmp/facts
+"""
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
new file mode 100644
index 0000000..eec2424
--- /dev/null
+++ b/lib/ansible/modules/get_url.py
@@ -0,0 +1,706 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: get_url
+short_description: Downloads files from HTTP, HTTPS, or FTP to node
+description:
+ - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
+ server I(must) have direct access to the remote resource.
+ - By default, if an environment variable C(<protocol>_proxy) is set on
+ the target host, requests will be sent through that proxy. This
+ behaviour can be overridden by setting a variable for this task
+ (see R(setting the environment,playbooks_environment)),
+ or by using the use_proxy option.
+ - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
+ your proxy environment for both protocols is correct.
+ - From Ansible 2.4 when run with C(--check), it will do a HEAD request to validate the URL but
+ will not download the entire file or verify it against hashes and will report incorrect changed status.
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+version_added: '0.6'
+options:
+ ciphers:
+ description:
+ - SSL/TLS Ciphers to use for the request
+ - 'When a list is provided, all ciphers are joined in order with C(:)'
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ type: list
+ elements: str
+ version_added: '2.14'
+ decompress:
+ description:
+ - Whether to attempt to decompress gzip content-encoded responses
+ type: bool
+ default: true
+ version_added: '2.14'
+ url:
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - Absolute path of where to download the file to.
+ - If C(dest) is a directory, either the server provided filename or, if
+ none provided, the base name of the URL on the remote server will be
+ used. If a directory, C(force) has no effect.
+ - If C(dest) is a directory, the file will always be downloaded
+ (regardless of the C(force) and C(checksum) option), but
+ replaced only if the contents changed.
+ type: path
+ required: true
+ tmp_dest:
+ description:
+ - Absolute path of where temporary file is downloaded to.
+ - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
+ - When run on Ansible prior to 2.5, it defaults to C(TMPDIR), C(TEMP) or C(TMP) env variables or a platform specific value.
+ - U(https://docs.python.org/3/library/tempfile.html#tempfile.tempdir)
+ type: path
+ version_added: '2.1'
+ force:
+ description:
+ - If C(true) and C(dest) is not a directory, will download the file every
+ time and replace the file if the contents change. If C(false), the file
+ will only be downloaded if the destination does not exist. Generally
+ should be C(true) only for small local files.
+ - Prior to 0.6, this module behaved as if C(true) was the default.
+ type: bool
+ default: no
+ version_added: '0.7'
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '2.1'
+ checksum:
+ description:
+ - 'If a checksum is passed to this parameter, the digest of the
+ destination file will be calculated after it is downloaded to ensure
+ its integrity and verify that the transfer completed successfully.
+ Format: <algorithm>:<checksum|url>, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
+ checksum="sha256:http://example.com/path/sha256sum.txt"'
+ - If you worry about portability, only the sha1 algorithm is available
+ on all platforms and python versions.
+ - The third party hashlib library can be installed for access to additional algorithms.
+ - Additionally, if a checksum is passed to this parameter, and the file exist under
+ the C(dest) location, the I(destination_checksum) would be calculated, and if
+ checksum equals I(destination_checksum), the file download would be skipped
+ (unless C(force) is true). If the checksum does not equal I(destination_checksum),
+ the destination file is deleted.
+ type: str
+ default: ''
+ version_added: "2.0"
+ use_proxy:
+ description:
+ - if C(false), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: yes
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ timeout:
+ description:
+ - Timeout in seconds for URL request.
+ type: int
+ default: 10
+ version_added: '1.8'
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ - The hash/dict format was added in Ansible 2.6.
+ - Previous versions used a C("key:value,key:value") string format.
+ - The C("key:value,key:value") string format is deprecated and has been removed in version 2.10.
+ type: dict
+ version_added: '2.0'
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ - Since version 2.8 you can also use the C(username) alias for this option.
+ type: str
+ aliases: ['username']
+ version_added: '1.6'
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ - Since version 2.8 you can also use the 'password' alias for this option.
+ type: str
+ aliases: ['password']
+ version_added: '1.6'
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ version_added: '2.0'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, C(client_key) is not required.
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If C(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ unredirected_headers:
+ description:
+ - A list of header names that will not be sent on subsequent redirected requests. This list is case
+ insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
+ headers such as C(Authorization) here to avoid potential credential exposure.
+ default: []
+ type: list
+ elements: str
+ version_added: '2.12'
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is I(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
+ use_netrc:
+ description:
+ - Determining whether to use credentials from ``~/.netrc`` file
+ - By default .netrc is used with Basic authentication headers
+ - When set to False, .netrc credentials are ignored
+ type: bool
+ default: true
+ version_added: '2.14'
+# informational: requirements for nodes
+extends_documentation_fragment:
+ - files
+ - action_common_attributes
+attributes:
+ check_mode:
+ details: the changed status will reflect comparison to an empty source file
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+seealso:
+- module: ansible.builtin.uri
+- module: ansible.windows.win_get_url
+author:
+- Jan-Piet Mens (@jpmens)
+'''
+
+EXAMPLES = r'''
+- name: Download foo.conf
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ mode: '0440'
+
+- name: Download file and force basic auth
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ force_basic_auth: yes
+
+- name: Download file with custom HTTP headers
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ headers:
+ key1: one
+ key2: two
+
+- name: Download file with check (sha256)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+
+- name: Download file with check (md5)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
+
+- name: Download file with checksum url (sha256)
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:http://example.com/path/sha256sum.txt
+
+- name: Download file from a file path
+ ansible.builtin.get_url:
+ url: file:///tmp/afile.txt
+ dest: /tmp/afilecopy.txt
+
+- name: < Fetch file that requires authentication.
+ username/password only available since 2.8, in older versions you need to use url_username/url_password
+ ansible.builtin.get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ username: bar
+ password: '{{ mysecret }}'
+'''
+
+RETURN = r'''
+backup_file:
+ description: name of backup file created after download
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+checksum_dest:
+ description: sha1 checksum of the file after copy
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+checksum_src:
+ description: sha1 checksum of the file
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+elapsed:
+ description: The number of seconds that elapsed while performing the download
+ returned: always
+ type: int
+ sample: 23
+gid:
+ description: group id of the file
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: group of the file
+ returned: success
+ type: str
+ sample: "httpd"
+md5sum:
+ description: md5 checksum of the file after download
+ returned: when supported
+ type: str
+ sample: "2a5aeecc61dc98c4d780b14b330e3282"
+mode:
+ description: permissions of the target
+ returned: success
+ type: str
+ sample: "0644"
+msg:
+ description: the HTTP message from the request
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+owner:
+ description: owner of the file
+ returned: success
+ type: str
+ sample: httpd
+secontext:
+ description: the SELinux security context of the file
+ returned: success
+ type: str
+ sample: unconfined_u:object_r:user_tmp_t:s0
+size:
+ description: size of the target
+ returned: success
+ type: int
+ sample: 1220
+src:
+ description: source file used after download
+ returned: always
+ type: str
+ sample: /tmp/tmpAdFLdV
+state:
+ description: state of the target
+ returned: success
+ type: str
+ sample: file
+status_code:
+ description: the HTTP status code from the request
+ returned: always
+ type: int
+ sample: 200
+uid:
+ description: owner id of the file, after execution
+ returned: success
+ type: int
+ sample: 100
+url:
+ description: the actual URL used for the request
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import datetime
+import os
+import re
+import shutil
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+# ==============================================================
+# url handling
+
+
+def url_filename(url):
+ fn = os.path.basename(urlsplit(url)[2])
+ if fn == '':
+ return 'index.html'
+ return fn
+
+
+def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest='', method='GET', unredirected_headers=None,
+ decompress=True, ciphers=None, use_netrc=True):
+ """
+ Download data from the url and store in a temporary file.
+
+ Return (tempfile, info about the request)
+ """
+
+ start = datetime.datetime.utcnow()
+ rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+ elapsed = (datetime.datetime.utcnow() - start).seconds
+
+ if info['status'] == 304:
+ module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ module.fail_json(msg=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ # create a temporary file and copy content to do checksum-based replacement
+ if tmp_dest:
+ # tmp_dest should be an existing dir
+ tmp_dest_is_dir = os.path.isdir(tmp_dest)
+ if not tmp_dest_is_dir:
+ if os.path.exists(tmp_dest):
+ module.fail_json(msg="%s is a file but should be a directory." % tmp_dest, elapsed=elapsed)
+ else:
+ module.fail_json(msg="%s directory does not exist." % tmp_dest, elapsed=elapsed)
+ else:
+ tmp_dest = module.tmpdir
+
+ fd, tempname = tempfile.mkstemp(dir=tmp_dest)
+
+ f = os.fdopen(fd, 'wb')
+ try:
+ shutil.copyfileobj(rsp, f)
+ except Exception as e:
+ os.remove(tempname)
+ module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
+ f.close()
+ rsp.close()
+ return tempname, info
+
+
+def extract_filename_from_headers(headers):
+ """
+ Extracts a filename from the given dict of HTTP headers.
+
+ Looks for the content-disposition header and applies a regex.
+ Returns the filename if successful, else None."""
+ cont_disp_regex = 'attachment; ?filename="?([^"]+)'
+ res = None
+
+ if 'content-disposition' in headers:
+ cont_disp = headers['content-disposition']
+ match = re.match(cont_disp_regex, cont_disp)
+ if match:
+ res = match.group(1)
+ # Try preventing any funny business.
+ res = os.path.basename(res)
+
+ return res
+
+
+def is_url(checksum):
+ """
+ Returns True if checksum value has supported URL scheme, else False."""
+ supported_schemes = ('http', 'https', 'ftp', 'file')
+
+ return urlsplit(checksum).scheme in supported_schemes
+
+
+# ==============================================================
+# main
+
+def main():
+ argument_spec = url_argument_spec()
+
+ # setup aliases
+ argument_spec['url_username']['aliases'] = ['username']
+ argument_spec['url_password']['aliases'] = ['password']
+
+ argument_spec.update(
+ url=dict(type='str', required=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ checksum=dict(type='str', default=''),
+ timeout=dict(type='int', default=10),
+ headers=dict(type='dict'),
+ tmp_dest=dict(type='path'),
+ unredirected_headers=dict(type='list', elements='str', default=[]),
+ decompress=dict(type='bool', default=True),
+ ciphers=dict(type='list', elements='str'),
+ use_netrc=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ url = module.params['url']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ force = module.params['force']
+ checksum = module.params['checksum']
+ use_proxy = module.params['use_proxy']
+ timeout = module.params['timeout']
+ headers = module.params['headers']
+ tmp_dest = module.params['tmp_dest']
+ unredirected_headers = module.params['unredirected_headers']
+ decompress = module.params['decompress']
+ ciphers = module.params['ciphers']
+ use_netrc = module.params['use_netrc']
+
+ result = dict(
+ changed=False,
+ checksum_dest=None,
+ checksum_src=None,
+ dest=dest,
+ elapsed=0,
+ url=url,
+ )
+
+ dest_is_dir = os.path.isdir(dest)
+ last_mod_time = None
+
+ # checksum specified, parse for algorithm and checksum
+ if checksum:
+ try:
+ algorithm, checksum = checksum.split(':', 1)
+ except ValueError:
+ module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
+
+ if is_url(checksum):
+ checksum_url = checksum
+ # download checksum file to checksum_tmpsrc
+ checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest,
+ unredirected_headers=unredirected_headers, ciphers=ciphers, use_netrc=use_netrc)
+ with open(checksum_tmpsrc) as f:
+ lines = [line.rstrip('\n') for line in f]
+ os.remove(checksum_tmpsrc)
+ checksum_map = []
+ filename = url_filename(url)
+ if len(lines) == 1 and len(lines[0].split()) == 1:
+ # Only a single line with a single string
+ # treat it as a checksum only file
+ checksum_map.append((lines[0], filename))
+ else:
+ # The assumption here is the file is in the format of
+ # checksum filename
+ for line in lines:
+ # Split by one whitespace to keep the leading type char ' ' (whitespace) for text and '*' for binary
+ parts = line.split(" ", 1)
+ if len(parts) == 2:
+ # Remove the leading type char, we expect
+ if parts[1].startswith((" ", "*",)):
+ parts[1] = parts[1][1:]
+
+ # Append checksum and path without potential leading './'
+ checksum_map.append((parts[0], parts[1].lstrip("./")))
+
+ # Look through each line in the checksum file for a hash corresponding to
+ # the filename in the url, returning the first hash that is found.
+ for cksum in (s for (s, f) in checksum_map if f == filename):
+ checksum = cksum
+ break
+ else:
+ checksum = None
+
+ if checksum is None:
+ module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
+ # Remove any non-alphanumeric characters, including the infamous
+ # Unicode zero-width space
+ checksum = re.sub(r'\W+', '', checksum).lower()
+ # Ensure the checksum portion is a hexdigest
+ try:
+ int(checksum, 16)
+ except ValueError:
+ module.fail_json(msg='The checksum format is invalid', **result)
+
+ if not dest_is_dir and os.path.exists(dest):
+ checksum_mismatch = False
+
+ # If the download is not forced and there is a checksum, allow
+ # checksum match to skip the download.
+ if not force and checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ checksum_mismatch = True
+
+ # Not forcing redownload, unless checksum does not match
+ if not force and checksum and not checksum_mismatch:
+ # Not forcing redownload, unless checksum does not match
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, False)
+ if result['changed']:
+ module.exit_json(msg="file already exists but file attributes changed", **result)
+ module.exit_json(msg="file already exists", **result)
+
+ # If the file already exists, prepare the last modified time for the
+ # request.
+ mtime = os.path.getmtime(dest)
+ last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
+
+ # If the checksum does not match we have to force the download
+ # because last_mod_time may be newer than on remote
+ if checksum_mismatch:
+ force = True
+
+ # download to tmpsrc
+ start = datetime.datetime.utcnow()
+ method = 'HEAD' if module.check_mode else 'GET'
+ tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest, method,
+ unredirected_headers=unredirected_headers, decompress=decompress, ciphers=ciphers, use_netrc=use_netrc)
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ result['src'] = tmpsrc
+
+ # Now the request has completed, we can finally generate the final
+ # destination file name from the info dict.
+
+ if dest_is_dir:
+ filename = extract_filename_from_headers(info)
+ if not filename:
+ # Fall back to extracting the filename from the URL.
+ # Pluck the URL from the info, since a redirect could have changed
+ # it.
+ filename = url_filename(info['url'])
+ dest = os.path.join(dest, filename)
+ result['dest'] = dest
+
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
+ result['checksum_src'] = module.sha1(tmpsrc)
+
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (dest), **result)
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not readable" % (dest), **result)
+ result['checksum_dest'] = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(dest)):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
+
+ if module.check_mode:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ result['changed'] = ('checksum_dest' not in result or
+ result['checksum_src'] != result['checksum_dest'])
+ module.exit_json(msg=info.get('msg', ''), **result)
+
+ backup_file = None
+ if result['checksum_src'] != result['checksum_dest']:
+ try:
+ if backup:
+ if os.path.exists(dest):
+ backup_file = module.backup_local(dest)
+ module.atomic_move(tmpsrc, dest, unsafe_writes=module.params['unsafe_writes'])
+ except Exception as e:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
+ exception=traceback.format_exc(), **result)
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+
+ if checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ os.remove(dest)
+ module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
+
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
+
+ # Backwards compat only. We'll return None on FIPS enabled systems
+ try:
+ result['md5sum'] = module.md5(dest)
+ except ValueError:
+ result['md5sum'] = None
+
+ if backup_file:
+ result['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py
new file mode 100644
index 0000000..1f76380
--- /dev/null
+++ b/lib/ansible/modules/getent.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: getent
+short_description: A wrapper to the unix getent utility
+description:
+ - Runs getent against one of it's various databases and returns information into
+ the host's facts, in a getent_<database> prefixed variable.
+version_added: "1.8"
+options:
+ database:
+ description:
+ - The name of a getent database supported by the target system (passwd, group,
+ hosts, etc).
+ type: str
+ required: True
+ key:
+ description:
+ - Key from which to return values from the specified database, otherwise the
+ full contents are returned.
+ type: str
+ default: ''
+ service:
+ description:
+ - Override all databases with the specified service
+ - The underlying system must support the service flag which is not always available.
+ type: str
+ version_added: "2.9"
+ split:
+ description:
+ - Character used to split the database values into lists/arrays such as C(:) or C(\t),
+ otherwise it will try to pick one depending on the database.
+ type: str
+ fail_key:
+ description:
+ - If a supplied key is missing this will make the task fail if C(true).
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - Not all databases support enumeration, check system documentation for details.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Get root user info
+ ansible.builtin.getent:
+ database: passwd
+ key: root
+- ansible.builtin.debug:
+ var: ansible_facts.getent_passwd
+
+- name: Get all groups
+ ansible.builtin.getent:
+ database: group
+ split: ':'
+- ansible.builtin.debug:
+ var: ansible_facts.getent_group
+
+- name: Get all hosts, split by tab
+ ansible.builtin.getent:
+ database: hosts
+- ansible.builtin.debug:
+ var: ansible_facts.getent_hosts
+
+- name: Get http service info, no error if missing
+ ansible.builtin.getent:
+ database: services
+ key: http
+ fail_key: False
+- ansible.builtin.debug:
+ var: ansible_facts.getent_services
+
+- name: Get user password hash (requires sudo/root)
+ ansible.builtin.getent:
+ database: shadow
+ key: www-data
+ split: ':'
+- ansible.builtin.debug:
+ var: ansible_facts.getent_shadow
+
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Facts to add to ansible_facts.
+ returned: always
+ type: dict
+ contains:
+ getent_<database>:
+ description:
+ - A list of results or a single result as a list of the fields the db provides
+ - The list elements depend on the database queried, see getent man page for the structure
+ - Starting at 2.11 it now returns multiple duplicate entries, previouslly it only returned the last one
+ returned: always
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ database=dict(type='str', required=True),
+ key=dict(type='str', no_log=False),
+ service=dict(type='str'),
+ split=dict(type='str'),
+ fail_key=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ colon = ['passwd', 'shadow', 'group', 'gshadow']
+
+ database = module.params['database']
+ key = module.params.get('key')
+ split = module.params.get('split')
+ service = module.params.get('service')
+ fail_key = module.params.get('fail_key')
+
+ getent_bin = module.get_bin_path('getent', True)
+
+ if key is not None:
+ cmd = [getent_bin, database, key]
+ else:
+ cmd = [getent_bin, database]
+
+ if service is not None:
+ cmd.extend(['-s', service])
+
+ if split is None and database in colon:
+ split = ':'
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ msg = "Unexpected failure!"
+ dbtree = 'getent_%s' % database
+ results = {dbtree: {}}
+
+ if rc == 0:
+ seen = {}
+ for line in out.splitlines():
+ record = line.split(split)
+
+ if record[0] in seen:
+ # more than one result for same key, ensure we store in a list
+ if seen[record[0]] == 1:
+ results[dbtree][record[0]] = [results[dbtree][record[0]]]
+
+ results[dbtree][record[0]].append(record[1:])
+ seen[record[0]] += 1
+ else:
+ # new key/value, just assign
+ results[dbtree][record[0]] = record[1:]
+ seen[record[0]] = 1
+
+ module.exit_json(ansible_facts=results)
+
+ elif rc == 1:
+ msg = "Missing arguments, or database unknown."
+ elif rc == 2:
+ msg = "One or more supplied key could not be found in the database."
+ if not fail_key:
+ results[dbtree][key] = None
+ module.exit_json(ansible_facts=results, msg=msg)
+ elif rc == 3:
+ msg = "Enumeration not supported on this database."
+
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
new file mode 100644
index 0000000..37477b3
--- /dev/null
+++ b/lib/ansible/modules/git.py
@@ -0,0 +1,1418 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+version_added: "0.0.1"
+short_description: Deploy software (or files) from git checkouts
+description:
+ - Manage I(git) checkouts of repositories to deploy files or software.
+extends_documentation_fragment: action_common_attributes
+options:
+ repo:
+ description:
+ - git, SSH, or HTTP(S) protocol address of the git repository.
+ type: str
+ required: true
+ aliases: [ name ]
+ dest:
+ description:
+ - The path of where the repository should be checked out. This
+ is equivalent to C(git clone [repo_url] [directory]). The repository
+ named in I(repo) is not appended to this path and the destination directory must be empty. This
+ parameter is required, unless I(clone) is set to C(false).
+ type: path
+ required: true
+ version:
+ description:
+ - What version of the repository to check out. This can be
+ the literal string C(HEAD), a branch name, a tag name.
+ It can also be a I(SHA-1) hash, in which case I(refspec) needs
+ to be specified if the given revision is not already available.
+ type: str
+ default: "HEAD"
+ accept_hostkey:
+ description:
+ - Will ensure or not that "-o StrictHostKeyChecking=no" is present as an ssh option.
+ - Be aware that this disables a protection against MITM attacks.
+ - Those using OpenSSH >= 7.5 might want to set I(ssh_opt) to 'StrictHostKeyChecking=accept-new'
+ instead, it does not remove the MITM issue but it does restrict it to the first attempt.
+ type: bool
+ default: 'no'
+ version_added: "1.5"
+ accept_newhostkey:
+ description:
+ - As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
+ used which is safer and will only accepts host keys which are
+ not present or are the same. if C(true), ensure that
+ "-o StrictHostKeyChecking=accept-new" is present as an ssh option.
+ type: bool
+ default: 'no'
+ version_added: "2.12"
+ ssh_opts:
+ description:
+ - Options git will pass to ssh when used as protocol, it works via C(git)'s
+ GIT_SSH/GIT_SSH_COMMAND environment variables.
+ - For older versions it appends GIT_SSH_OPTS (specific to this module) to the
+ variables above or via a wrapper script.
+ - Other options can add to this list, like I(key_file) and I(accept_hostkey).
+ - An example value could be "-o StrictHostKeyChecking=no" (although this particular
+ option is better set by I(accept_hostkey)).
+ - The module ensures that 'BatchMode=yes' is always present to avoid prompts.
+ type: str
+ version_added: "1.5"
+
+ key_file:
+ description:
+ - Specify an optional private key file path, on the target host, to use for the checkout.
+ - This ensures 'IdentitiesOnly=yes' is present in ssh_opts.
+ type: path
+ version_added: "1.5"
+ reference:
+ description:
+ - Reference repository (see "git clone --reference ...").
+ version_added: "1.4"
+ remote:
+ description:
+ - Name of the remote.
+ type: str
+ default: "origin"
+ refspec:
+ description:
+ - Add an additional refspec to be fetched.
+ If version is set to a I(SHA-1) not reachable from any branch
+ or tag, this option may be necessary to specify the ref containing
+ the I(SHA-1).
+ Uses the same syntax as the C(git fetch) command.
+ An example value could be "refs/meta/config".
+ type: str
+ version_added: "1.9"
+ force:
+ description:
+ - If C(true), any modified files in the working
+ repository will be discarded. Prior to 0.7, this was always
+ C(true) and could not be disabled. Prior to 1.9, the default was
+ C(true).
+ type: bool
+ default: 'no'
+ version_added: "0.7"
+ depth:
+ description:
+ - Create a shallow clone with a history truncated to the specified
+ number or revisions. The minimum possible value is C(1), otherwise
+ ignored. Needs I(git>=1.9.1) to work correctly.
+ type: int
+ version_added: "1.2"
+ clone:
+ description:
+ - If C(false), do not clone the repository even if it does not exist locally.
+ type: bool
+ default: 'yes'
+ version_added: "1.9"
+ update:
+ description:
+ - If C(false), do not retrieve new revisions from the origin repository.
+ - Operations like archive will work on the existing (old) repository and might
+ not respond to changes to the options version or remote.
+ type: bool
+ default: 'yes'
+ version_added: "1.2"
+ executable:
+ description:
+ - Path to git executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: path
+ version_added: "1.4"
+ bare:
+ description:
+ - If C(true), repository will be created as a bare repo, otherwise
+ it will be a standard repo with a workspace.
+ type: bool
+ default: 'no'
+ version_added: "1.4"
+ umask:
+ description:
+ - The umask to set before doing any checkouts, or any other
+ repository maintenance.
+ type: raw
+ version_added: "2.2"
+
+ recursive:
+ description:
+ - If C(false), repository will be cloned without the --recursive
+ option, skipping sub-modules.
+ type: bool
+ default: 'yes'
+ version_added: "1.6"
+
+ single_branch:
+ description:
+ - Clone only the history leading to the tip of the specified revision.
+ type: bool
+ default: 'no'
+ version_added: '2.11'
+
+ track_submodules:
+ description:
+ - If C(true), submodules will track the latest commit on their
+ master branch (or other branch specified in .gitmodules). If
+ C(false), submodules will be kept at the revision specified by the
+ main project. This is equivalent to specifying the --remote flag
+ to git submodule update.
+ type: bool
+ default: 'no'
+ version_added: "1.8"
+
+ verify_commit:
+ description:
+ - If C(true), when cloning or checking out a I(version) verify the
+ signature of a GPG signed commit. This requires git version>=2.1.0
+ to be installed. The commit MUST be signed and the public key MUST
+ be present in the GPG keyring.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+
+ archive:
+ description:
+ - Specify archive file path with extension. If specified, creates an
+ archive file of the specified format containing the tree structure
+ for the source tree.
+ Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
+ - This will clone and perform git archive from local directory as not
+ all git servers support git archive.
+ type: path
+ version_added: "2.4"
+
+ archive_prefix:
+ description:
+ - Specify a prefix to add to each file path in archive. Requires I(archive) to be specified.
+ version_added: "2.10"
+ type: str
+
+ separate_git_dir:
+ description:
+ - The path to place the cloned repository. If specified, Git repository
+ can be separated from working tree.
+ type: path
+ version_added: "2.7"
+
+ gpg_whitelist:
+ description:
+ - A list of trusted GPG fingerprints to compare to the fingerprint of the
+ GPG-signed commit.
+ - Only used when I(verify_commit=yes).
+ - Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
+ type: list
+ elements: str
+ default: []
+ version_added: "2.9"
+
+requirements:
+ - git>=1.7.1 (the command line tool)
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to use the option accept_hostkey. Another solution is to
+ add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
+'''
+
+EXAMPLES = '''
+- name: Git checkout
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ version: release-0.22
+
+- name: Read-write git checkout from github
+ ansible.builtin.git:
+ repo: git@github.com:mylogin/hello.git
+ dest: /home/mylogin/hello
+
+- name: Just ensuring the repo checkout exists
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ update: no
+
+- name: Just get information about the repository whether or not it has already been cloned locally
+ ansible.builtin.git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ clone: no
+ update: no
+
+- name: Checkout a github repo and use refspec to fetch all pull requests
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ refspec: '+refs/pull/*:refs/heads/*'
+
+- name: Create git archive from repo
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ archive: /tmp/ansible-examples.zip
+
+- name: Clone a repo with separate git directory
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ separate_git_dir: /src/ansible-examples.git
+
+- name: Example clone of a single branch
+ ansible.builtin.git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ single_branch: yes
+ version: master
+
+- name: Avoid hanging when http(s) password is missing
+ ansible.builtin.git:
+ repo: https://github.com/ansible/could-be-a-private-repo
+ dest: /src/from-private-repo
+ environment:
+ GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
+ # or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
+'''
+
+RETURN = '''
+after:
+ description: Last commit revision of the repository retrieved during the update.
+ returned: success
+ type: str
+ sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
+before:
+ description: Commit revision before the repository was updated, "null" for new repository.
+ returned: success
+ type: str
+ sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
+remote_url_changed:
+ description: Contains True or False whether or not the remote URL was changed.
+ returned: success
+ type: bool
+ sample: True
+warnings:
+ description: List of warnings if requested features were not available due to a too old git version.
+ returned: error
+ type: str
+ sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
+git_dir_now:
+ description: Contains the new path of .git directory if it is changed.
+ returned: success
+ type: str
+ sample: /path/to/new/git/dir
+git_dir_before:
+ description: Contains the original path of .git directory if it is changed.
+ returned: success
+ type: str
+ sample: /path/to/old/git/dir
+'''
+
+import filecmp
+import os
+import re
+import shlex
+import stat
+import sys
+import shutil
+import tempfile
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six import b, string_types
+
+
+def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
+ if os.path.exists(repo_dir):
+ module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
+ if worktree_dir:
+ dot_git_file_path = os.path.join(worktree_dir, '.git')
+ try:
+ shutil.move(old_repo_dir, repo_dir)
+ with open(dot_git_file_path, 'w') as dot_git_file:
+ dot_git_file.write('gitdir: %s' % repo_dir)
+ result['git_dir_before'] = old_repo_dir
+ result['git_dir_now'] = repo_dir
+ except (IOError, OSError) as err:
+ # if we already moved the .git dir, roll it back
+ if os.path.exists(repo_dir):
+ shutil.move(repo_dir, old_repo_dir)
+ module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
+
+
+def head_splitter(headfile, remote, module=None, fail_on_error=False):
+ '''Extract the head reference'''
+ # https://github.com/ansible/ansible-modules-core/pull/907
+
+ res = None
+ if os.path.exists(headfile):
+ rawdata = None
+ try:
+ f = open(headfile, 'r')
+ rawdata = f.readline()
+ f.close()
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to read %s" % headfile)
+ if rawdata:
+ try:
+ rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
+ refparts = rawdata.split(' ')
+ newref = refparts[-1]
+ nrefparts = newref.split('/', 2)
+ res = nrefparts[-1].rstrip('\n')
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to split head from '%s'" % rawdata)
+ return res
+
+
+def unfrackgitpath(path):
+ if path is None:
+ return None
+
+ # copied from ansible.utils.path
+ return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
+
+
+def get_submodule_update_params(module, git_path, cwd):
+ # or: git submodule [--quiet] update [--init] [-N|--no-fetch]
+ # [-f|--force] [--rebase] [--reference <repository>] [--merge]
+ # [--recursive] [--] [<path>...]
+
+ params = []
+
+ # run a bad submodule command to get valid params
+ cmd = "%s submodule update --help" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
+ lines = stderr.split('\n')
+ update_line = None
+ for line in lines:
+ if 'git submodule [--quiet] update ' in line:
+ update_line = line
+ if update_line:
+ update_line = update_line.replace('[', '')
+ update_line = update_line.replace(']', '')
+ update_line = update_line.replace('|', ' ')
+ parts = shlex.split(update_line)
+ for part in parts:
+ if part.startswith('--'):
+ part = part.replace('--', '')
+ params.append(part)
+
+ return params
+
+
+def write_ssh_wrapper(module):
+ '''
+ This writes an shell wrapper for ssh options to be used with git
+ this is only relevant for older versions of gitthat cannot
+ handle the options themselves. Returns path to the script
+ '''
+ try:
+ # make sure we have full permission to the module_dir, which
+ # may not be the case if we're sudo'ing to a non-root user
+ if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):
+ fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')
+ else:
+ raise OSError
+ except (IOError, OSError):
+ fd, wrapper_path = tempfile.mkstemp()
+
+ # use existing git_ssh/ssh_command, fallback to 'ssh'
+ template = b("""#!/bin/sh
+%s $GIT_SSH_OPTS "$@"
+""" % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh')))
+
+ # write it
+ with os.fdopen(fd, 'w+b') as fh:
+ fh.write(template)
+
+ # set execute
+ st = os.stat(wrapper_path)
+ os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
+
+ module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template))
+
+ # ensure we cleanup after ourselves
+ module.add_cleanup_file(path=wrapper_path)
+
+ return wrapper_path
+
+
+def set_git_ssh_env(key_file, ssh_opts, git_version, module):
+ '''
+ use environment variables to configure git's ssh execution,
+ which varies by version but this functino should handle all.
+ '''
+
+ # initialise to existing ssh opts and/or append user provided
+ if ssh_opts is None:
+ ssh_opts = os.environ.get('GIT_SSH_OPTS', '')
+ else:
+ ssh_opts = os.environ.get('GIT_SSH_OPTS', '') + ' ' + ssh_opts
+
+ # hostkey acceptance
+ accept_key = "StrictHostKeyChecking=no"
+ if module.params['accept_hostkey'] and accept_key not in ssh_opts:
+ ssh_opts += " -o %s" % accept_key
+
+ # avoid prompts
+ force_batch = 'BatchMode=yes'
+ if force_batch not in ssh_opts:
+ ssh_opts += ' -o %s' % (force_batch)
+
+ # deal with key file
+ if key_file:
+ key_opt = '-i %s' % key_file
+ if key_opt not in ssh_opts:
+ ssh_opts += ' %s' % key_opt
+
+ ikey = 'IdentitiesOnly=yes'
+ if ikey not in ssh_opts:
+ ssh_opts += ' -o %s' % ikey
+
+ # older than 2.3 does not know how to use git_ssh_command,
+ # so we force it into get_ssh var
+ # https://github.com/gitster/git/commit/09d60d785c68c8fa65094ecbe46fbc2a38d0fc1f
+ if git_version < LooseVersion('2.3.0'):
+ # for use in wrapper
+ os.environ["GIT_SSH_OPTS"] = ssh_opts
+
+ # these versions don't support GIT_SSH_OPTS so have to write wrapper
+ wrapper = write_ssh_wrapper(module)
+
+ # force use of git_ssh_opts via wrapper, git_ssh cannot not handle arguments
+ os.environ['GIT_SSH'] = wrapper
+ else:
+ # we construct full finalized command string here
+ full_cmd = os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh'))
+ if ssh_opts:
+ full_cmd += ' ' + ssh_opts
+ # git_ssh_command can handle arguments to ssh
+ os.environ["GIT_SSH_COMMAND"] = full_cmd
+
+
+def get_version(module, git_path, dest, ref="HEAD"):
+ ''' samples the version of the git repo '''
+
+ cmd = "%s rev-parse %s" % (git_path, ref)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ sha = to_native(stdout).rstrip('\n')
+ return sha
+
+
+def ssh_supports_acceptnewhostkey(module):
+ try:
+ ssh_path = get_bin_path('ssh')
+ except ValueError as err:
+ module.fail_json(
+ msg='Remote host is missing ssh command, so you cannot '
+ 'use acceptnewhostkey option.', details=to_text(err))
+ supports_acceptnewhostkey = True
+ cmd = [ssh_path, '-o', 'StrictHostKeyChecking=accept-new', '-V']
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ supports_acceptnewhostkey = False
+ return supports_acceptnewhostkey
+
+
+def get_submodule_versions(git_path, module, dest, version='HEAD'):
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(
+ msg='Unable to determine hashes of submodules',
+ stdout=out,
+ stderr=err,
+ rc=rc)
+ submodules = {}
+ subm_name = None
+ for line in out.splitlines():
+ if line.startswith("Entering '"):
+ subm_name = line[10:-1]
+ elif len(line.strip()) == 40:
+ if subm_name is None:
+ module.fail_json()
+ submodules[subm_name] = line.strip()
+ subm_name = None
+ else:
+ module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
+ if subm_name is not None:
+ module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
+
+ return submodules
+
+
+def clone(git_path, module, repo, dest, remote, depth, version, bare,
+ reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
+ ''' makes a new git repo if it does not already exist '''
+ dest_dirname = os.path.dirname(dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ cmd = [git_path, 'clone']
+
+ if bare:
+ cmd.append('--bare')
+ else:
+ cmd.extend(['--origin', remote])
+
+ is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
+ if depth:
+ if version == 'HEAD' or refspec:
+ cmd.extend(['--depth', str(depth)])
+ elif is_branch_or_tag:
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend(['--branch', version])
+ else:
+ # only use depth if the remote object is branch or tag (i.e. fetchable)
+ module.warn("Ignoring depth argument. "
+ "Shallow clones are only available for "
+ "HEAD, branches, tags or in combination with refspec.")
+ if reference:
+ cmd.extend(['--reference', str(reference)])
+
+ if single_branch:
+ if git_version_used is None:
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
+
+ if git_version_used < LooseVersion('1.7.10'):
+ module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
+ else:
+ cmd.append("--single-branch")
+
+ if is_branch_or_tag:
+ cmd.extend(['--branch', version])
+
+ needs_separate_git_dir_fallback = False
+ if separate_git_dir:
+ if git_version_used is None:
+ module.fail_json(msg='Cannot find git executable at %s' % git_path)
+ if git_version_used < LooseVersion('1.7.5'):
+ # git before 1.7.5 doesn't have separate-git-dir argument, do fallback
+ needs_separate_git_dir_fallback = True
+ else:
+ cmd.append('--separate-git-dir=%s' % separate_git_dir)
+
+ cmd.extend([repo, dest])
+ module.run_command(cmd, check_rc=True, cwd=dest_dirname)
+ if needs_separate_git_dir_fallback:
+ relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)
+
+ if bare and remote != 'origin':
+ module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
+
+ if refspec:
+ cmd = [git_path, 'fetch']
+ if depth:
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend([remote, refspec])
+ module.run_command(cmd, check_rc=True, cwd=dest)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+
+def has_local_mods(module, git_path, dest, bare):
+ if bare:
+ return False
+
+ cmd = "%s status --porcelain" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ lines = stdout.splitlines()
+ lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
+
+ return len(lines) > 0
+
+
+def reset(git_path, module, dest):
+ '''
+ Resets the index and working tree to HEAD.
+ Discards any changes to tracked files in working
+ tree since that commit.
+ '''
+ cmd = "%s reset --hard HEAD" % (git_path,)
+ return module.run_command(cmd, check_rc=True, cwd=dest)
+
+
+def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
+ ''' Return the difference between 2 versions '''
+ if before is None:
+ return {'prepared': '>> Newly checked out %s' % after}
+ elif before != after:
+ # Ensure we have the object we are referring to during git diff !
+ git_version_used = git_version(git_path, module)
+ fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
+ cmd = '%s diff %s %s' % (git_path, before, after)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc == 0 and out:
+ return {'prepared': out}
+ elif rc == 0:
+ return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
+ elif err:
+ return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
+ else:
+ return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
+ return {}
+
+
+def get_remote_head(git_path, module, dest, version, remote, bare):
+ cloning = False
+ cwd = None
+ tag = False
+ if remote == module.params['repo']:
+ cloning = True
+ elif remote == 'file://' + os.path.expanduser(module.params['repo']):
+ cloning = True
+ else:
+ cwd = dest
+ if version == 'HEAD':
+ if cloning:
+ # cloning the repo, just get the remote's HEAD version
+ cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
+ else:
+ head_branch = get_head_branch(git_path, module, dest, remote, bare)
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
+ elif is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ elif is_remote_tag(git_path, module, dest, remote, version):
+ tag = True
+ cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
+ else:
+ # appears to be a sha1. return as-is since it appears
+ # cannot check for a specific sha1 on remote
+ return version
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
+ if len(out) < 1:
+ module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
+
+ out = to_native(out)
+
+ if tag:
+ # Find the dereferenced tag if this is an annotated tag.
+ for tag in out.split('\n'):
+ if tag.endswith(version + '^{}'):
+ out = tag
+ break
+ elif tag.endswith(version):
+ out = tag
+
+ rev = out.split()[0]
+ return rev
+
+
+def is_remote_tag(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def get_branches(git_path, module, dest):
+ branches = []
+ cmd = '%s branch --no-color -a' % (git_path,)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
+ for line in out.split('\n'):
+ if line.strip():
+ branches.append(line.strip())
+ return branches
+
+
+def get_annotated_tags(git_path, module, dest):
+ tags = []
+ cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
+ for line in to_native(out).split('\n'):
+ if line.strip():
+ tagtype, tagname = line.strip().split(':')
+ if tagtype == 'tag':
+ tags.append(tagname)
+ return tags
+
+
+def is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def is_local_branch(git_path, module, dest, branch):
+ branches = get_branches(git_path, module, dest)
+ lbranch = '%s' % branch
+ if lbranch in branches:
+ return True
+ elif '* %s' % branch in branches:
+ return True
+ else:
+ return False
+
+
+def is_not_a_branch(git_path, module, dest):
+ branches = get_branches(git_path, module, dest)
+ for branch in branches:
+ if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
+ return True
+ return False
+
+
+def get_repo_path(dest, bare):
+ if bare:
+ repo_path = dest
+ else:
+ repo_path = os.path.join(dest, '.git')
+ # Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
+ # submodule structure).
+ if os.path.isfile(repo_path):
+ with open(repo_path, 'r') as gitfile:
+ data = gitfile.read()
+ ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
+ if ref_prefix:
+ raise ValueError('.git file has invalid git dir reference format')
+
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ # Use original destination directory with data from .git file.
+ repo_path = os.path.join(dest, gitdir)
+ if not os.path.isdir(repo_path):
+ raise ValueError('%s is not a directory' % repo_path)
+ return repo_path
+
+
+def get_head_branch(git_path, module, dest, remote, bare=False):
+ '''
+ Determine what branch HEAD is associated with. This is partly
+ taken from lib/ansible/utils/__init__.py. It finds the correct
+ path to .git/HEAD and reads from that file the branch that HEAD is
+ associated with. In the case of a detached HEAD, this will look
+ up the branch in .git/refs/remotes/<remote>/HEAD.
+ '''
+ try:
+ repo_path = get_repo_path(dest, bare)
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ # Read .git/HEAD for the name of the branch.
+ # If we're in a detached HEAD state, look up the branch associated with
+ # the remote HEAD in .git/refs/remotes/<remote>/HEAD
+ headfile = os.path.join(repo_path, "HEAD")
+ if is_not_a_branch(git_path, module, dest):
+ headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
+ branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
+ return branch
+
+
+def get_remote_url(git_path, module, dest, remote):
+ '''Return URL of remote source for repo.'''
+ command = [git_path, 'ls-remote', '--get-url', remote]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ # There was an issue getting remote URL, most likely
+ # command is not available in this version of Git.
+ return None
+ return to_native(out).rstrip('\n')
+
+
+def set_remote_url(git_path, module, repo, dest, remote):
+ ''' updates repo from remote sources '''
+ # Return if remote URL isn't changing.
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
+ return False
+
+ command = [git_path, 'remote', 'set-url', remote, repo]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ label = "set a new url %s for %s" % (repo, remote)
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
+
+ # Return False if remote_url is None to maintain previous behavior
+ # for Git versions prior to 1.7.5 that lack required functionality.
+ return remote_url is not None
+
+
+def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
+ ''' updates repo from remote sources '''
+ set_remote_url(git_path, module, repo, dest, remote)
+ commands = []
+
+ fetch_str = 'download remote objects and refs'
+ fetch_cmd = [git_path, 'fetch']
+
+ refspecs = []
+ if depth:
+ # try to find the minimal set of refs we need to fetch to get a
+ # successful checkout
+ currenthead = get_head_branch(git_path, module, dest, remote)
+ if refspec:
+ refspecs.append(refspec)
+ elif version == 'HEAD':
+ refspecs.append(currenthead)
+ elif is_remote_branch(git_path, module, dest, repo, version):
+ if currenthead != version:
+ # this workaround is only needed for older git versions
+ # 1.8.3 is broken, 1.9.x works
+ # ensure that remote branch is available as both local and remote ref
+ refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
+ refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
+ elif is_remote_tag(git_path, module, dest, repo, version):
+ refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
+ if refspecs:
+ # if refspecs is empty, i.e. version is neither heads nor tags
+ # assume it is a version hash
+ # fall back to a full clone, otherwise we might not be able to checkout
+ # version
+ fetch_cmd.extend(['--depth', str(depth)])
+
+ if not depth or not refspecs:
+ # don't try to be minimalistic but do a full clone
+ # also do this if depth is given, but version is something that can't be fetched directly
+ if bare:
+ refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
+ else:
+ # ensure all tags are fetched
+ if git_version_used >= LooseVersion('1.9'):
+ fetch_cmd.append('--tags')
+ else:
+ # old git versions have a bug in --tags that prevents updating existing tags
+ commands.append((fetch_str, fetch_cmd + [remote]))
+ refspecs = ['+refs/tags/*:refs/tags/*']
+ if refspec:
+ refspecs.append(refspec)
+
+ if force:
+ fetch_cmd.append('--force')
+
+ fetch_cmd.extend([remote])
+
+ commands.append((fetch_str, fetch_cmd + refspecs))
+
+ for (label, command) in commands:
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
+
+
+def submodules_fetch(git_path, module, remote, track_submodules, dest):
+ changed = False
+
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ # no submodules
+ return changed
+
+ gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
+ for line in gitmodules_file:
+ # Check for new submodules
+ if not changed and line.strip().startswith('path'):
+ path = line.split('=', 1)[1].strip()
+ # Check that dest/path/.git exists
+ if not os.path.exists(os.path.join(dest, path, '.git')):
+ changed = True
+
+ # Check for updates to existing modules
+ if not changed:
+ # Fetch updates
+ begin = get_submodule_versions(git_path, module, dest)
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
+
+ if track_submodules:
+ # Compare against submodule HEAD
+ # FIXME: determine this from .gitmodules
+ version = 'master'
+ after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
+ if begin != after:
+ changed = True
+ else:
+ # Compare against the superproject's expectation
+ cmd = [git_path, 'submodule', 'status']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
+ for line in out.splitlines():
+ if line[0] != ' ':
+ changed = True
+ break
+ return changed
+
+
+def submodule_update(git_path, module, dest, track_submodules, force=False):
+ ''' init and update any submodules '''
+
+ # get the valid submodule params
+ params = get_submodule_update_params(module, git_path, dest)
+
+ # skip submodule commands if .gitmodules is not present
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ return (0, '', '')
+ cmd = [git_path, 'submodule', 'sync']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if 'remote' in params and track_submodules:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
+ else:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
+ if force:
+ cmd.append('--force')
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
+ return (rc, out, err)
+
+
+def set_remote_branch(git_path, module, dest, remote, version, depth):
+ """set refs for the remote branch version
+
+ This assumes the branch does not yet exist locally and is therefore also not checked out.
+ Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
+ """
+
+ branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
+ branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
+ cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
+
+
+def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
+ cmd = ''
+ if version == 'HEAD':
+ branch = get_head_branch(git_path, module, dest, remote)
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % branch,
+ stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
+ else:
+ # FIXME check for local_branch first, should have been fetched already
+ if is_remote_branch(git_path, module, dest, remote, version):
+ if depth and not is_local_branch(git_path, module, dest, version):
+ # git clone --depth implies --single-branch, which makes
+ # the checkout fail if the version changes
+ # fetch the remote branch, to be able to check it out next
+ set_remote_branch(git_path, module, dest, remote, version, depth)
+ if not is_local_branch(git_path, module, dest, version):
+ cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
+ else:
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
+ else:
+ cmd = "%s checkout --force %s" % (git_path, version)
+ (rc, out1, err1) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ if version != 'HEAD':
+ module.fail_json(msg="Failed to checkout %s" % (version),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+ else:
+ module.fail_json(msg="Failed to checkout branch %s" % (branch),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+ return (rc, out1, err1)
+
+
+def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
+ if version in get_annotated_tags(git_path, module, dest):
+ git_sub = "verify-tag"
+ else:
+ git_sub = "verify-commit"
+ cmd = "%s %s %s" % (git_path, git_sub, version)
+ if gpg_whitelist:
+ cmd += " --raw"
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
+ if gpg_whitelist:
+ fingerprint = get_gpg_fingerprint(err)
+ if fingerprint not in gpg_whitelist:
+ module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
+ return (rc, out, err)
+
+
+def get_gpg_fingerprint(output):
+ """Return a fingerprint of the primary key.
+
+ Ref:
+ https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
+ """
+ for line in output.splitlines():
+ data = line.split()
+ if data[1] != 'VALIDSIG':
+ continue
+
+ # if signed with a subkey, this contains the primary key fingerprint
+ data_id = 11 if len(data) == 11 else 2
+ return data[data_id]
+
+
+def git_version(git_path, module):
+ """return the installed version of git"""
+ cmd = "%s --version" % git_path
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ # one could fail_json here, but the version info is not that important,
+ # so let's try to fail only on actual git commands
+ return None
+ rematch = re.search('git version (.*)$', to_native(out))
+ if not rematch:
+ return None
+ return LooseVersion(rematch.groups()[0])
+
+
+def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
+ """ Create git archive in given source directory """
+ cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
+ if archive_prefix is not None:
+ cmd.insert(-1, '--prefix')
+ cmd.insert(-1, archive_prefix)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to perform archive operation",
+ details="Git archive command failed to create "
+ "archive %s using %s directory."
+ "Error: %s" % (archive, dest, err))
+ return rc, out, err
+
+
+def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
+ """ Helper function for creating archive using git_archive """
+ all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
+ '.tgz': 'tgz'}
+ _, archive_ext = os.path.splitext(archive)
+ archive_fmt = all_archive_fmt.get(archive_ext, None)
+ if archive_fmt is None:
+ module.fail_json(msg="Unable to get file extension from "
+ "archive file name : %s" % archive,
+ details="Please specify archive as filename with "
+ "extension. File extension can be one "
+ "of ['tar', 'tar.gz', 'zip', 'tgz']")
+
+ repo_name = repo.split("/")[-1].replace(".git", "")
+
+ if os.path.exists(archive):
+ # If git archive file exists, then compare it with new git archive file.
+ # if match, do nothing
+ # if does not match, then replace existing with temp archive file.
+ tempdir = tempfile.mkdtemp()
+ new_archive_dest = os.path.join(tempdir, repo_name)
+ new_archive = new_archive_dest + '.' + archive_fmt
+ git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)
+
+ # filecmp is supposed to be efficient than md5sum checksum
+ if filecmp.cmp(new_archive, archive):
+ result.update(changed=False)
+ # Cleanup before exiting
+ try:
+ shutil.rmtree(tempdir)
+ except OSError:
+ pass
+ else:
+ try:
+ shutil.move(new_archive, archive)
+ shutil.rmtree(tempdir)
+ result.update(changed=True)
+ except OSError as e:
+ module.fail_json(msg="Failed to move %s to %s" %
+ (new_archive, archive),
+ details=u"Error occurred while moving : %s"
+ % to_text(e))
+ else:
+ # Perform archive from local directory
+ git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
+ result.update(changed=True)
+
+# ===========================================
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(required=True, aliases=['name']),
+ version=dict(default='HEAD'),
+ remote=dict(default='origin'),
+ refspec=dict(default=None),
+ reference=dict(default=None),
+ force=dict(default='no', type='bool'),
+ depth=dict(default=None, type='int'),
+ clone=dict(default='yes', type='bool'),
+ update=dict(default='yes', type='bool'),
+ verify_commit=dict(default='no', type='bool'),
+ gpg_whitelist=dict(default=[], type='list', elements='str'),
+ accept_hostkey=dict(default='no', type='bool'),
+ accept_newhostkey=dict(default='no', type='bool'),
+ key_file=dict(default=None, type='path', required=False),
+ ssh_opts=dict(default=None, required=False),
+ executable=dict(default=None, type='path'),
+ bare=dict(default='no', type='bool'),
+ recursive=dict(default='yes', type='bool'),
+ single_branch=dict(default=False, type='bool'),
+ track_submodules=dict(default='no', type='bool'),
+ umask=dict(default=None, type='raw'),
+ archive=dict(type='path'),
+ archive_prefix=dict(),
+ separate_git_dir=dict(type='path'),
+ ),
+ mutually_exclusive=[('separate_git_dir', 'bare'), ('accept_hostkey', 'accept_newhostkey')],
+ required_by={'archive_prefix': ['archive']},
+ supports_check_mode=True
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ version = module.params['version']
+ remote = module.params['remote']
+ refspec = module.params['refspec']
+ force = module.params['force']
+ depth = module.params['depth']
+ update = module.params['update']
+ allow_clone = module.params['clone']
+ bare = module.params['bare']
+ verify_commit = module.params['verify_commit']
+ gpg_whitelist = module.params['gpg_whitelist']
+ reference = module.params['reference']
+ single_branch = module.params['single_branch']
+ git_path = module.params['executable'] or module.get_bin_path('git', True)
+ key_file = module.params['key_file']
+ ssh_opts = module.params['ssh_opts']
+ umask = module.params['umask']
+ archive = module.params['archive']
+ archive_prefix = module.params['archive_prefix']
+ separate_git_dir = module.params['separate_git_dir']
+
+ result = dict(changed=False, warnings=list())
+
+ if module.params['accept_hostkey']:
+ if ssh_opts is not None:
+ if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
+ ssh_opts += " -o StrictHostKeyChecking=no"
+ else:
+ ssh_opts = "-o StrictHostKeyChecking=no"
+
+ if module.params['accept_newhostkey']:
+ if not ssh_supports_acceptnewhostkey(module):
+ module.warn("Your ssh client does not support accept_newhostkey option, therefore it cannot be used.")
+ else:
+ if ssh_opts is not None:
+ if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
+ ssh_opts += " -o StrictHostKeyChecking=accept-new"
+ else:
+ ssh_opts = "-o StrictHostKeyChecking=accept-new"
+
+ # evaluate and set the umask before doing anything else
+ if umask is not None:
+ if not isinstance(umask, string_types):
+ module.fail_json(msg="umask must be defined as a quoted octal integer")
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=to_text(sys.exc_info()[1]))
+ os.umask(umask)
+
+ # Certain features such as depth require a file:/// protocol for path based urls
+ # so force a protocol here ...
+ if os.path.expanduser(repo).startswith('/'):
+ repo = 'file://' + os.path.expanduser(repo)
+
+ # We screenscrape a huge amount of git commands so use C locale anytime we
+ # call run_command()
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale)
+
+ if separate_git_dir:
+ separate_git_dir = os.path.realpath(separate_git_dir)
+
+ gitconfig = None
+ if not dest and allow_clone:
+ module.fail_json(msg="the destination directory must be specified unless clone=no")
+ elif dest:
+ dest = os.path.abspath(dest)
+ try:
+ repo_path = get_repo_path(dest, bare)
+ if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
+ result['changed'] = True
+ if not module.check_mode:
+ relocate_repo(module, result, separate_git_dir, repo_path, dest)
+ repo_path = separate_git_dir
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ gitconfig = os.path.join(repo_path, 'config')
+
+ # iface changes so need it to make decisions
+ git_version_used = git_version(git_path, module)
+
+ # GIT_SSH=<path> as an environment variable, might create sh wrapper script for older versions.
+ set_git_ssh_env(key_file, ssh_opts, git_version_used, module)
+
+ if depth is not None and git_version_used < LooseVersion('1.9.1'):
+ module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
+ depth = None
+
+ recursive = module.params['recursive']
+ track_submodules = module.params['track_submodules']
+
+ result.update(before=None)
+
+ local_mods = False
+ if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
+ # if there is no git configuration, do a clone operation unless:
+ # * the user requested no clone (they just want info)
+ # * we're doing a check mode test
+ # In those cases we do an ls-remote
+ if module.check_mode or not allow_clone:
+ remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
+ result.update(changed=True, after=remote_head)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ # there's no git config, so clone
+ clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
+ refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ # this does no checking that the repo is the actual repo
+ # requested.
+ result['before'] = get_version(module, git_path, dest)
+ result.update(after=result['before'])
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+ else:
+ # else do a pull
+ local_mods = has_local_mods(module, git_path, dest, bare)
+ result['before'] = get_version(module, git_path, dest)
+ if local_mods:
+ # failure should happen regardless of check mode
+ if not force:
+ module.fail_json(msg="Local modifications exist in the destination: " + dest + " (force=no).", **result)
+ # if force and in non-check mode, do a reset
+ if not module.check_mode:
+ reset(git_path, module, dest)
+ result.update(changed=True, msg='Local modifications exist in the destination: ' + dest)
+
+ # exit if already at desired sha version
+ if module.check_mode:
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
+ else:
+ remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
+ result.update(remote_url_changed=remote_url_changed)
+
+ if module.check_mode:
+ remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
+ result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
+ # FIXME: This diff should fail since the new remote_head is not fetched yet?!
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ else:
+ fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)
+
+ result['after'] = get_version(module, git_path, dest)
+
+ # switch to version specified regardless of whether
+ # we got new revisions from the repository
+ if not bare:
+ switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)
+
+ # Deal with submodules
+ submodules_updated = False
+ if recursive and not bare:
+ submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
+ if submodules_updated:
+ result.update(submodules_changed=submodules_updated)
+
+ if module.check_mode:
+ result.update(changed=True, after=remote_head)
+ module.exit_json(**result)
+
+ # Switch to version specified
+ submodule_update(git_path, module, dest, track_submodules, force=force)
+
+ # determine if we changed anything
+ result['after'] = get_version(module, git_path, dest)
+
+ if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
+ result.update(changed=True)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py
new file mode 100644
index 0000000..109a161
--- /dev/null
+++ b/lib/ansible/modules/group.py
@@ -0,0 +1,662 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: group
+version_added: "0.0.2"
+short_description: Add or remove groups
+requirements:
+- groupadd
+- groupdel
+- groupmod
+description:
+ - Manage presence of groups on a host.
+ - For Windows targets, use the M(ansible.windows.win_group) module instead.
+options:
+ name:
+ description:
+ - Name of the group to manage.
+ type: str
+ required: true
+ gid:
+ description:
+ - Optional I(GID) to set for the group.
+ type: int
+ state:
+ description:
+ - Whether the group should be present or not on the remote host.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ system:
+ description:
+ - If I(yes), indicates that the group created is a system group.
+ type: bool
+ default: no
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local groups.
+ (for example, it uses C(lgroupadd) instead of C(groupadd)).
+ - This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.6"
+ non_unique:
+ description:
+ - This option allows to change the group ID to a non-unique value. Requires C(gid).
+ - Not supported on macOS or BusyBox distributions.
+ type: bool
+ default: no
+ version_added: "2.8"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.user
+- module: ansible.windows.win_group
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = '''
+- name: Ensure group "somegroup" exists
+ ansible.builtin.group:
+ name: somegroup
+ state: present
+
+- name: Ensure group "docker" exists with correct gid
+ ansible.builtin.group:
+ name: docker
+ state: present
+ gid: 1750
+'''
+
+RETURN = r'''
+gid:
+ description: Group ID of the group.
+ returned: When C(state) is 'present'
+ type: int
+ sample: 1001
+name:
+ description: Group name.
+ returned: always
+ type: str
+ sample: users
+state:
+ description: Whether the group is present or not.
+ returned: always
+ type: str
+ sample: 'absent'
+system:
+ description: Whether the group is a system group or not.
+ returned: When C(state) is 'present'
+ type: bool
+ sample: False
+'''
+
+import grp
+import os
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.sys_info import get_platform_subclass
+
+
+class Group(object):
+ """
+ This is a generic Group manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None # type: str | None
+ GROUPFILE = '/etc/group'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Group)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.gid = module.params['gid']
+ self.system = module.params['system']
+ self.local = module.params['local']
+ self.non_unique = module.params['non_unique']
+
+ def execute_command(self, cmd):
+ return self.module.run_command(cmd)
+
+ def group_del(self):
+ if self.local:
+ command_name = 'lgroupdel'
+ else:
+ command_name = 'groupdel'
+ cmd = [self.module.get_bin_path(command_name, True), self.name]
+ return self.execute_command(cmd)
+
+ def _local_check_gid_exists(self):
+ if self.gid:
+ for gr in grp.getgrall():
+ if self.gid == gr.gr_gid and self.name != gr.gr_name:
+ self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name))
+
+ def group_add(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupadd'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupadd'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupmod'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupmod'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_exists(self):
+ # The grp module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not a group exists locally.
+ # It returns True if the group exists locally or in the directory, so instead
+ # look in the local GROUP file for an existing account.
+ if self.local:
+ if not os.path.exists(self.GROUPFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.GROUPFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and group was not found in {file}. "
+ "The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE))
+
+ return exists
+
+ else:
+ try:
+ if grp.getgrnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self):
+ if not self.group_exists():
+ return False
+ try:
+ info = list(grp.getgrnam(self.name))
+ except KeyError:
+ return False
+ return info
+
+
+# ===========================================
+
+class SunOS(Group):
+ """
+ This is a SunOS Group manipulation class. Solaris doesn't have
+ the 'system' group concept.
+
+ This overrides the following methods from the generic class:-
+ - group_add()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class AIX(Group):
+ """
+ This is a AIX Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('rmgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('mkgroup', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('id=' + str(kwargs[key]))
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-a')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('chgroup', True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('id=' + str(kwargs[key]))
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class FreeBsdGroup(Group):
+ """
+ This is a FreeBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
+ info = self.group_info()
+ cmd_len = len(cmd)
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ # modify the group if cmd will do anything
+ if cmd_len != len(cmd):
+ if self.module.check_mode:
+ return (0, '', '')
+ return self.execute_command(cmd)
+ return (None, '', '')
+
+
+class DragonFlyBsdGroup(FreeBsdGroup):
+ """
+ This is a DragonFlyBSD Group manipulation class.
+ It inherits all behaviors from FreeBsdGroup class.
+ """
+
+ platform = 'DragonFly'
+
+
+# ===========================================
+
+class DarwinGroup(Group):
+ """
+ This is a Mac macOS Darwin Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ group manipulation are done using dseditgroup(1).
+ """
+
+ platform = 'Darwin'
+ distribution = None
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'create']
+ if self.gid is not None:
+ cmd += ['-i', str(self.gid)]
+ elif 'system' in kwargs and kwargs['system'] is True:
+ gid = self.get_lowest_available_system_gid()
+ if gid is not False:
+ self.gid = str(gid)
+ cmd += ['-i', str(self.gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'delete']
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_mod(self, gid=None):
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'edit']
+ if gid is not None:
+ cmd += ['-i', str(gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+ return (None, '', '')
+
+ def get_lowest_available_system_gid(self):
+ # check for lowest available system gid (< 500)
+ try:
+ cmd = [self.module.get_bin_path('dscl', True)]
+ cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
+ (rc, out, err) = self.execute_command(cmd)
+ lines = out.splitlines()
+ highest = 0
+ for group_info in lines:
+ parts = group_info.split(' ')
+ if len(parts) > 1:
+ gid = int(parts[-1])
+ if gid > highest and gid < 500:
+ highest = gid
+ if highest == 0 or highest == 499:
+ return False
+ return (highest + 1)
+ except Exception:
+ return False
+
+
+class OpenBsdGroup(Group):
+ """
+ This is a OpenBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class NetBsdGroup(Group):
+ """
+ This is a NetBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+
+class BusyBoxGroup(Group):
+ """
+ BusyBox group manipulation class for systems that have addgroup and delgroup.
+
+ It overrides the following methods:
+ - group_add()
+ - group_del()
+ - group_mod()
+ """
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('addgroup', True)]
+ if self.gid is not None:
+ cmd.extend(['-g', str(self.gid)])
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('delgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ # Since there is no groupmod command, modify /etc/group directly
+ info = self.group_info()
+ if self.gid is not None and self.gid != info[2]:
+ with open('/etc/group', 'rb') as f:
+ b_groups = f.read()
+
+ b_name = to_bytes(self.name)
+ b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
+ b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
+
+ if b':%d:' % self.gid in b_groups:
+ self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
+
+ if self.module.check_mode:
+ return 0, '', ''
+ b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
+ with open('/etc/group', 'wb') as f:
+ f.write(b_new_groups)
+ return 0, '', ''
+
+ return None, '', ''
+
+
+class AlpineGroup(BusyBoxGroup):
+
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ gid=dict(type='int'),
+ system=dict(type='bool', default=False),
+ local=dict(type='bool', default=False),
+ non_unique=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ['non_unique', True, ['gid']],
+ ],
+ )
+
+ group = Group(module)
+
+ module.debug('Group instantiated - platform %s' % group.platform)
+ if group.distribution:
+ module.debug('Group instantiated - distribution %s' % group.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = group.name
+ result['state'] = group.state
+
+ if group.state == 'absent':
+
+ if group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_del()
+ if rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ elif group.state == 'present':
+
+ if not group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_add(gid=group.gid, system=group.system)
+ else:
+ (rc, out, err) = group.group_mod(gid=group.gid)
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if group.group_exists():
+ info = group.group_info()
+ result['system'] = group.system
+ result['gid'] = info[2]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group_by.py b/lib/ansible/modules/group_by.py
new file mode 100644
index 0000000..ef641f2
--- /dev/null
+++ b/lib/ansible/modules/group_by.py
@@ -0,0 +1,89 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx (@jhoekx)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: group_by
+short_description: Create Ansible groups based on facts
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+description:
+- Use facts to create ad-hoc groups that can be used later in a playbook.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ key:
+ description:
+ - The variables whose values will be used as groups.
+ type: str
+ required: true
+ parents:
+ description:
+ - The list of the parent groups.
+ type: list
+ elements: str
+ default: all
+ version_added: "2.4"
+attributes:
+ action:
+ support: full
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ bypass_task_loop:
+ support: none
+ check_mode:
+ details: While this makes no changes to target systems the 'in memory' inventory will still be altered
+ support: partial
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+- Spaces in group names are converted to dashes '-'.
+- Though this module does not change the remote host,
+ we do provide 'changed' status as it can be useful
+ for those trying to track inventory changes.
+seealso:
+- module: ansible.builtin.add_host
+author:
+- Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r'''
+- name: Create groups based on the machine architecture
+ ansible.builtin.group_by:
+ key: machine_{{ ansible_machine }}
+
+- name: Create groups like 'virt_kvm_host'
+ ansible.builtin.group_by:
+ key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
+
+- name: Create nested groups
+ ansible.builtin.group_by:
+ key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
+ parents:
+ - el{{ ansible_distribution_major_version }}
+
+- name: Add all active hosts to a static group
+ ansible.builtin.group_by:
+ key: done
+'''
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
new file mode 100644
index 0000000..f6284df
--- /dev/null
+++ b/lib/ansible/modules/hostname.py
@@ -0,0 +1,908 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hostname
+author:
+ - Adrian Likins (@alikins)
+ - Hideki Saito (@saito-hideki)
+version_added: "1.4"
+short_description: Manage hostname
+requirements: [ hostname ]
+description:
+ - Set system's hostname. Supports most OSs/Distributions including those using C(systemd).
+ - Windows, HP-UX, and AIX are not currently supported.
+notes:
+ - This module does B(NOT) modify C(/etc/hosts). You need to modify it yourself using other modules such as M(ansible.builtin.template)
+ or M(ansible.builtin.replace).
+ - On macOS, this module uses C(scutil) to set C(HostName), C(ComputerName), and C(LocalHostName). Since C(LocalHostName)
+ cannot contain spaces or most special characters, this module will replace characters when setting C(LocalHostName).
+options:
+ name:
+ description:
+ - Name of the host.
+ - If the value is a fully qualified domain name that does not resolve from the given host,
+ this will cause the module to hang for a few seconds while waiting for the name resolution attempt to timeout.
+ type: str
+ required: true
+ use:
+ description:
+ - Which strategy to use to update the hostname.
+ - If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
+ - Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
+ choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
+ type: str
+ version_added: '2.9'
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ facts:
+ support: full
+ platform:
+ platforms: posix
+'''
+
+EXAMPLES = '''
+- name: Set a hostname
+ ansible.builtin.hostname:
+ name: web01
+
+- name: Set a hostname specifying strategy
+ ansible.builtin.hostname:
+ name: web01
+ use: systemd
+'''
+
+import os
+import platform
+import socket
+import traceback
+
+import ansible.module_utils.compat.typing as t
+
+from ansible.module_utils.basic import (
+ AnsibleModule,
+ get_distribution,
+ get_distribution_version,
+)
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.utils import get_file_lines, get_file_content
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six import PY3, text_type
+
+STRATS = {
+ 'alpine': 'Alpine',
+ 'debian': 'Systemd',
+ 'freebsd': 'FreeBSD',
+ 'generic': 'Base',
+ 'macos': 'Darwin',
+ 'macosx': 'Darwin',
+ 'darwin': 'Darwin',
+ 'openbsd': 'OpenBSD',
+ 'openrc': 'OpenRC',
+ 'redhat': 'RedHat',
+ 'sles': 'SLES',
+ 'solaris': 'Solaris',
+ 'systemd': 'Systemd',
+}
+
+
+class BaseStrategy(object):
+ def __init__(self, module):
+ self.module = module
+ self.changed = False
+
+ def update_current_and_permanent_hostname(self):
+ self.update_current_hostname()
+ self.update_permanent_hostname()
+ return self.changed
+
+ def update_current_hostname(self):
+ name = self.module.params['name']
+ current_name = self.get_current_hostname()
+ if current_name != name:
+ if not self.module.check_mode:
+ self.set_current_hostname(name)
+ self.changed = True
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+ permanent_name = self.get_permanent_hostname()
+ if permanent_name != name:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+ def get_current_hostname(self):
+ return self.get_permanent_hostname()
+
+ def set_current_hostname(self, name):
+ pass
+
+ def get_permanent_hostname(self):
+ raise NotImplementedError
+
+ def set_permanent_hostname(self, name):
+ raise NotImplementedError
+
+
+class UnimplementedStrategy(BaseStrategy):
+ def update_current_and_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def update_current_hostname(self):
+ self.unimplemented_error()
+
+ def update_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def get_current_hostname(self):
+ self.unimplemented_error()
+
+ def set_current_hostname(self, name):
+ self.unimplemented_error()
+
+ def get_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def set_permanent_hostname(self, name):
+ self.unimplemented_error()
+
+ def unimplemented_error(self):
+ system = platform.system()
+ distribution = get_distribution()
+ if distribution is not None:
+ msg_platform = '%s (%s)' % (system, distribution)
+ else:
+ msg_platform = system
+ self.module.fail_json(
+ msg='hostname module cannot be used on platform %s' % msg_platform)
+
+
+class CommandStrategy(BaseStrategy):
+ COMMAND = 'hostname'
+
+ def __init__(self, module):
+ super(CommandStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ return 'UNKNOWN'
+
+ def set_permanent_hostname(self, name):
+ pass
+
+
+class FileStrategy(BaseStrategy):
+ FILE = '/etc/hostname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ return get_file_content(self.FILE, default='', strip=True)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ with open(self.FILE, 'w+') as f:
+ f.write("%s\n" % name)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class SLESStrategy(FileStrategy):
+ """
+ This is a SLES Hostname strategy class - it edits the
+ /etc/HOSTNAME file.
+ """
+ FILE = '/etc/HOSTNAME'
+
+
+class RedHatStrategy(BaseStrategy):
+ """
+ This is a Redhat Hostname strategy class - it edits the
+ /etc/sysconfig/network file.
+ """
+ NETWORK_FILE = '/etc/sysconfig/network'
+
+ def get_permanent_hostname(self):
+ try:
+ for line in get_file_lines(self.NETWORK_FILE):
+ line = to_native(line).strip()
+ if line.startswith('HOSTNAME'):
+ k, v = line.split('=')
+ return v.strip()
+ self.module.fail_json(
+ "Unable to locate HOSTNAME entry in %s" % self.NETWORK_FILE
+ )
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ lines = []
+ found = False
+ content = get_file_content(self.NETWORK_FILE, strip=False) or ""
+ for line in content.splitlines(True):
+ line = to_native(line)
+ if line.strip().startswith('HOSTNAME'):
+ lines.append("HOSTNAME=%s\n" % name)
+ found = True
+ else:
+ lines.append(line)
+ if not found:
+ lines.append("HOSTNAME=%s\n" % name)
+ with open(self.NETWORK_FILE, 'w+') as f:
+ f.writelines(lines)
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class AlpineStrategy(FileStrategy):
+ """
+ This is a Alpine Linux Hostname manipulation strategy class - it edits
+ the /etc/hostname file then run hostname -F /etc/hostname.
+ """
+
+ FILE = '/etc/hostname'
+ COMMAND = 'hostname'
+
+ def set_current_hostname(self, name):
+ super(AlpineStrategy, self).set_current_hostname(name)
+ hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ cmd = [hostname_cmd, '-F', self.FILE]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class SystemdStrategy(BaseStrategy):
+ """
+ This is a Systemd hostname manipulation strategy class - it uses
+ the hostnamectl command.
+ """
+
+ COMMAND = "hostnamectl"
+
+ def __init__(self, module):
+ super(SystemdStrategy, self).__init__(module)
+ self.hostnamectl_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostnamectl_cmd, '--transient', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostnamectl_cmd, '--transient', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ cmd = [self.hostnamectl_cmd, '--static', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostnamectl_cmd, '--pretty', '--static', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def update_current_and_permanent_hostname(self):
+ # Must set the permanent hostname prior to current to avoid NetworkManager complaints
+ # about setting the hostname outside of NetworkManager
+ self.update_permanent_hostname()
+ self.update_current_hostname()
+ return self.changed
+
+
+class OpenRCStrategy(BaseStrategy):
+ """
+ This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
+ the /etc/conf.d/hostname file.
+ """
+
+ FILE = '/etc/conf.d/hostname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ for line in get_file_lines(self.FILE):
+ line = line.strip()
+ if line.startswith('hostname='):
+ return line[10:].strip('"')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ lines = [x.strip() for x in get_file_lines(self.FILE)]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+
+ with open(self.FILE, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class OpenBSDStrategy(FileStrategy):
+ """
+ This is a OpenBSD family Hostname manipulation strategy class - it edits
+ the /etc/myname file.
+ """
+
+ FILE = '/etc/myname'
+
+
+class SolarisStrategy(BaseStrategy):
+ """
+ This is a Solaris11 or later Hostname manipulation strategy class - it
+ execute hostname command.
+ """
+
+ COMMAND = "hostname"
+
+ def __init__(self, module):
+ super(SolarisStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def set_current_hostname(self, name):
+ cmd_option = '-t'
+ cmd = [self.hostname_cmd, cmd_option, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ fmri = 'svc:/system/identity:node'
+ pattern = 'config/nodename'
+ cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class FreeBSDStrategy(BaseStrategy):
+ """
+ This is a FreeBSD hostname manipulation strategy class - it edits
+ the /etc/rc.conf.d/hostname file.
+ """
+
+ FILE = '/etc/rc.conf.d/hostname'
+ COMMAND = "hostname"
+
+ def __init__(self, module):
+ super(FreeBSDStrategy, self).__init__(module)
+ self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.FILE):
+ return ''
+
+ try:
+ for line in get_file_lines(self.FILE):
+ line = line.strip()
+ if line.startswith('hostname='):
+ return line[10:].strip('"')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to read hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ if os.path.isfile(self.FILE):
+ lines = [x.strip() for x in get_file_lines(self.FILE)]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+ else:
+ lines = ['hostname="%s"' % name]
+
+ with open(self.FILE, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(
+ msg="failed to update hostname: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+class DarwinStrategy(BaseStrategy):
+ """
+ This is a macOS hostname manipulation strategy class. It uses
+ /usr/sbin/scutil to set ComputerName, HostName, and LocalHostName.
+
+ HostName corresponds to what most platforms consider to be hostname.
+ It controls the name used on the command line and SSH.
+
+ However, macOS also has LocalHostName and ComputerName settings.
+ LocalHostName controls the Bonjour/ZeroConf name, used by services
+ like AirDrop. This class implements a method, _scrub_hostname(), that mimics
+ the transformations macOS makes on hostnames when enterened in the Sharing
+ preference pane. It replaces spaces with dashes and removes all special
+ characters.
+
+ ComputerName is the name used for user-facing GUI services, like the
+ System Preferences/Sharing pane and when users connect to the Mac over the network.
+ """
+
+ def __init__(self, module):
+ super(DarwinStrategy, self).__init__(module)
+
+ self.scutil = self.module.get_bin_path('scutil', True)
+ self.name_types = ('HostName', 'ComputerName', 'LocalHostName')
+ self.scrubbed_name = self._scrub_hostname(self.module.params['name'])
+
+ def _make_translation(self, replace_chars, replacement_chars, delete_chars):
+ if PY3:
+ return str.maketrans(replace_chars, replacement_chars, delete_chars)
+
+ if not isinstance(replace_chars, text_type) or not isinstance(replacement_chars, text_type):
+ raise ValueError('replace_chars and replacement_chars must both be strings')
+ if len(replace_chars) != len(replacement_chars):
+ raise ValueError('replacement_chars must be the same length as replace_chars')
+
+ table = dict(zip((ord(c) for c in replace_chars), replacement_chars))
+ for char in delete_chars:
+ table[ord(char)] = None
+
+ return table
+
+ def _scrub_hostname(self, name):
+ """
+ LocalHostName only accepts valid DNS characters while HostName and ComputerName
+ accept a much wider range of characters. This function aims to mimic how macOS
+ translates a friendly name to the LocalHostName.
+ """
+
+ # Replace all these characters with a single dash
+ name = to_text(name)
+ replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ '
+ delete_chars = u".'"
+ table = self._make_translation(replace_chars, u'-' * len(replace_chars), delete_chars)
+ name = name.translate(table)
+
+ # Replace multiple dashes with a single dash
+ while '-' * 2 in name:
+ name = name.replace('-' * 2, '')
+
+ name = name.rstrip('-')
+ return name
+
+ def get_current_hostname(self):
+ cmd = [self.scutil, '--get', 'HostName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0 and 'HostName: not set' not in err:
+ self.module.fail_json(msg="Failed to get current hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def get_permanent_hostname(self):
+ cmd = [self.scutil, '--get', 'ComputerName']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to get permanent hostname rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ for hostname_type in self.name_types:
+ cmd = [self.scutil, '--set', hostname_type]
+ if hostname_type == 'LocalHostName':
+ cmd.append(to_native(self.scrubbed_name))
+ else:
+ cmd.append(to_native(name))
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to set {3} to '{2}': {0} {1}".format(to_native(out), to_native(err), to_native(name), hostname_type))
+
+ def set_current_hostname(self, name):
+ pass
+
+ def update_current_hostname(self):
+ pass
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+
+ # Get all the current host name values in the order of self.name_types
+ all_names = tuple(self.module.run_command([self.scutil, '--get', name_type])[1].strip() for name_type in self.name_types)
+
+ # Get the expected host name values based on the order in self.name_types
+ expected_names = tuple(self.scrubbed_name if n == 'LocalHostName' else name for n in self.name_types)
+
+ # Ensure all three names are updated
+ if all_names != expected_names:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+
+class Hostname(object):
+ """
+ This is a generic Hostname manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to set different strategy instance to self.strategy.
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None # type: str | None
+ strategy_class = UnimplementedStrategy # type: t.Type[BaseStrategy]
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Hostname)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.use = module.params['use']
+
+ if self.use is not None:
+ strat = globals()['%sStrategy' % STRATS[self.use]]
+ self.strategy = strat(module)
+ elif platform.system() == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
+ # This is Linux and systemd is active
+ self.strategy = SystemdStrategy(module)
+ else:
+ self.strategy = self.strategy_class(module)
+
+ def update_current_and_permanent_hostname(self):
+ return self.strategy.update_current_and_permanent_hostname()
+
+ def get_current_hostname(self):
+ return self.strategy.get_current_hostname()
+
+ def set_current_hostname(self, name):
+ self.strategy.set_current_hostname(name)
+
+ def get_permanent_hostname(self):
+ return self.strategy.get_permanent_hostname()
+
+ def set_permanent_hostname(self, name):
+ self.strategy.set_permanent_hostname(name)
+
+
+class SLESHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Sles'
+ try:
+ distribution_version = get_distribution_version()
+ # cast to float may raise ValueError on non SLES, we use float for a little more safety over int
+ if distribution_version and 10 <= float(distribution_version) <= 12:
+ strategy_class = SLESStrategy # type: t.Type[BaseStrategy]
+ else:
+ raise ValueError()
+ except ValueError:
+ strategy_class = UnimplementedStrategy
+
+
+class RHELHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Redhat'
+ strategy_class = RedHatStrategy
+
+
+class CentOSHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Centos'
+ strategy_class = RedHatStrategy
+
+
+class AnolisOSHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Anolis'
+ strategy_class = RedHatStrategy
+
+
+class CloudlinuxserverHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinuxserver'
+ strategy_class = RedHatStrategy
+
+
+class CloudlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinux'
+ strategy_class = RedHatStrategy
+
+
+class AlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alinux'
+ strategy_class = RedHatStrategy
+
+
+class ScientificHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Scientific'
+ strategy_class = RedHatStrategy
+
+
+class OracleLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Oracle'
+ strategy_class = RedHatStrategy
+
+
+class VirtuozzoLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Virtuozzo'
+ strategy_class = RedHatStrategy
+
+
+class AmazonLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Amazon'
+ strategy_class = RedHatStrategy
+
+
+class DebianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Debian'
+ strategy_class = FileStrategy
+
+
+class KylinHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kylin'
+ strategy_class = FileStrategy
+
+
+class CumulusHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cumulus-linux'
+ strategy_class = FileStrategy
+
+
+class KaliHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kali'
+ strategy_class = FileStrategy
+
+
+class ParrotHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Parrot'
+ strategy_class = FileStrategy
+
+
+class UbuntuHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Ubuntu'
+ strategy_class = FileStrategy
+
+
+class LinuxmintHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linuxmint'
+ strategy_class = FileStrategy
+
+
+class LinaroHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linaro'
+ strategy_class = FileStrategy
+
+
+class DevuanHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Devuan'
+ strategy_class = FileStrategy
+
+
+class RaspbianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Raspbian'
+ strategy_class = FileStrategy
+
+
+class UosHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Uos'
+ strategy_class = FileStrategy
+
+
+class DeepinHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Deepin'
+ strategy_class = FileStrategy
+
+
+class GentooHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Gentoo'
+ strategy_class = OpenRCStrategy
+
+
+class ALTLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Altlinux'
+ strategy_class = RedHatStrategy
+
+
+class AlpineLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alpine'
+ strategy_class = AlpineStrategy
+
+
+class OpenBSDHostname(Hostname):
+ platform = 'OpenBSD'
+ distribution = None
+ strategy_class = OpenBSDStrategy
+
+
+class SolarisHostname(Hostname):
+ platform = 'SunOS'
+ distribution = None
+ strategy_class = SolarisStrategy
+
+
+class FreeBSDHostname(Hostname):
+ platform = 'FreeBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NetBSDHostname(Hostname):
+ platform = 'NetBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NeonHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Neon'
+ strategy_class = FileStrategy
+
+
+class DarwinHostname(Hostname):
+ platform = 'Darwin'
+ distribution = None
+ strategy_class = DarwinStrategy
+
+
+class VoidLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Void'
+ strategy_class = FileStrategy
+
+
+class PopHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Pop'
+ strategy_class = FileStrategy
+
+
+class EurolinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Eurolinux'
+ strategy_class = RedHatStrategy
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ use=dict(type='str', choices=list(STRATS.keys()))
+ ),
+ supports_check_mode=True,
+ )
+
+ hostname = Hostname(module)
+ name = module.params['name']
+
+ current_hostname = hostname.get_current_hostname()
+ permanent_hostname = hostname.get_permanent_hostname()
+
+ changed = hostname.update_current_and_permanent_hostname()
+
+ if name != current_hostname:
+ name_before = current_hostname
+ elif name != permanent_hostname:
+ name_before = permanent_hostname
+ else:
+ name_before = permanent_hostname
+
+ # NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
+ # slow to return if the name does not resolve correctly.
+ kw = dict(changed=changed, name=name,
+ ansible_facts=dict(ansible_hostname=name.split('.')[0],
+ ansible_nodename=name,
+ ansible_fqdn=socket.getfqdn(),
+ ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
+
+ if changed:
+ kw['diff'] = {'after': 'hostname = ' + name + '\n',
+ 'before': 'hostname = ' + name_before + '\n'}
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/import_playbook.py b/lib/ansible/modules/import_playbook.py
new file mode 100644
index 0000000..9adaebf
--- /dev/null
+++ b/lib/ansible/modules/import_playbook.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_playbook
+short_description: Import a playbook
+description:
+ - Includes a file with a list of plays to be executed.
+ - Files with a list of plays can only be included at the top level.
+ - You cannot use this action inside a play.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - The name of the imported playbook is specified directly without any other option.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+seealso:
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ tasks:
+ - ansible.builtin.debug:
+ msg: play1
+
+- name: Include a play after another play
+ ansible.builtin.import_playbook: otherplays.yaml
+
+- name: Set variables on an imported playbook
+ ansible.builtin.import_playbook: otherplays.yml
+ vars:
+ service: httpd
+
+- name: Include a playbook from a collection
+ ansible.builtin.import_playbook: my_namespace.my_collection.my_playbook
+
+- name: This DOES NOT WORK
+ hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: This fails because I'm inside a play already
+ ansible.builtin.import_playbook: stuff.yaml
+'''
+
+RETURN = r'''
+# This module does not return anything except plays to execute.
+'''
diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py
new file mode 100644
index 0000000..2f118f2
--- /dev/null
+++ b/lib/ansible/modules/import_role.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_role
+short_description: Import a role into a play
+description:
+ - Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
+ between other tasks of the play.
+ - Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
+ you want the opposite behavior, use M(ansible.builtin.include_role) instead.
+ - Does not work in handlers.
+version_added: '2.4'
+options:
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: true
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+notes:
+ - Handlers are made available to the whole play.
+ - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed to the play at playbook parsing time.
+ Due to this, these variables will be accessible to roles and tasks executed before the location of the
+ M(ansible.builtin.import_role) task.
+ - Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.import_role:
+ name: myrole
+
+ - name: Run tasks/other.yaml instead of 'main'
+ ansible.builtin.import_role:
+ name: myrole
+ tasks_from: other
+
+ - name: Pass variables to role
+ ansible.builtin.import_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+ - name: Apply condition to each task in role
+ ansible.builtin.import_role:
+ name: myrole
+ when: not idontwanttorun
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/import_tasks.py b/lib/ansible/modules/import_tasks.py
new file mode 100644
index 0000000..e578620
--- /dev/null
+++ b/lib/ansible/modules/import_tasks.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_tasks
+short_description: Import a task list
+description:
+ - Imports a list of tasks to be added to the current playbook for subsequent execution.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - |
+ Specifies the name of the imported file directly without any other option C(- import_tasks: file.yml).
+ - Most keywords, including loops and conditionals, only apply to the imported tasks, not to this statement itself.
+ - If you need any of those to apply, use M(ansible.builtin.include_tasks) instead.
+ file:
+ description:
+ - Specifies the name of the file that lists tasks to add to the current playbook.
+ type: str
+ version_added: '2.7'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.import
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.import_tasks:
+ file: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Apply conditional to all imported tasks
+ ansible.builtin.import_tasks: stuff.yaml
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_role.py b/lib/ansible/modules/include_role.py
new file mode 100644
index 0000000..ea7c61e
--- /dev/null
+++ b/lib/ansible/modules/include_role.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_role
+short_description: Load and execute a role
+description:
+ - Dynamically loads and executes a specified role as a task.
+ - May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) play objects, or as a task inside a role.
+ - Task-level keywords, loops, and conditionals apply only to the C(include_role) statement itself.
+ - To apply keywords to the tasks within the role, pass them using the C(apply) option or use M(ansible.builtin.import_role) instead.
+ - Ignores some keywords, like C(until) and C(retries).
+ - This module is also supported for Windows targets.
+ - Does not work in handlers.
+version_added: "2.2"
+options:
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to all tasks within the included role.
+ version_added: '2.7'
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: True
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ public:
+ description:
+ - This option dictates whether the role's C(vars) and C(defaults) are exposed to the play. If set to C(true)
+ the variables will be available to tasks following the C(include_role) task. This functionality differs from
+ standard variable exposure for roles listed under the C(roles) header or C(import_role) as they are exposed
+ to the play at playbook parsing time, and available to earlier roles and tasks as well.
+ type: bool
+ default: no
+ version_added: '2.7'
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+ rolespec_validate:
+ description:
+ - Perform role argument spec validation if an argument spec is defined.
+ type: bool
+ default: yes
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.include
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+notes:
+ - Handlers and are made available to the whole play.
+ - After Ansible 2.4, you can use M(ansible.builtin.import_role) for C(static) behaviour and this action for C(dynamic) one.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- ansible.builtin.include_role:
+ name: myrole
+
+- name: Run tasks/other.yaml instead of 'main'
+ ansible.builtin.include_role:
+ name: myrole
+ tasks_from: other
+
+- name: Pass variables to role
+ ansible.builtin.include_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+- name: Use role in loop
+ ansible.builtin.include_role:
+ name: '{{ roleinputvar }}'
+ loop:
+ - '{{ roleinput1 }}'
+ - '{{ roleinput2 }}'
+ loop_control:
+ loop_var: roleinputvar
+
+- name: Conditional role
+ ansible.builtin.include_role:
+ name: myrole
+ when: not idontwanttorun
+
+- name: Apply tags to tasks within included file
+ ansible.builtin.include_role:
+ name: install
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_tasks.py b/lib/ansible/modules/include_tasks.py
new file mode 100644
index 0000000..ff5d62a
--- /dev/null
+++ b/lib/ansible/modules/include_tasks.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_tasks
+short_description: Dynamically include a task list
+description:
+ - Includes a file with a list of tasks to be executed in the current playbook.
+version_added: '2.4'
+options:
+ file:
+ description:
+ - Specifies the name of the file that lists tasks to add to the current playbook.
+ type: str
+ version_added: '2.7'
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to the tasks within the include.
+ type: str
+ version_added: '2.7'
+ free-form:
+ description:
+ - |
+ Specifies the name of the imported file directly without any other option C(- include_tasks: file.yml).
+ - Is the equivalent of specifying an argument for the I(file) parameter.
+ - Most keywords, including loop, with_items, and conditionals, apply to this statement unlike M(ansible.builtin.import_tasks).
+ - The do-until loop is not supported.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+ - action_core.include
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play
+ ansible.builtin.include_tasks:
+ file: stuff.yaml
+
+ - ansible.builtin.debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - ansible.builtin.debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ ansible.builtin.include_tasks: "{{ hostvar }}.yaml"
+ when: hostvar is defined
+
+- name: Apply tags to tasks within included file
+ ansible.builtin.include_tasks:
+ file: install.yml
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+
+- name: Apply tags to tasks within included file when using free-form
+ ansible.builtin.include_tasks: install.yml
+ args:
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py
new file mode 100644
index 0000000..f0aad94
--- /dev/null
+++ b/lib/ansible/modules/include_vars.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Allen Sanabria (@linuxdynasty)
+module: include_vars
+short_description: Load variables from files, dynamically within a task
+description:
+ - Loads YAML/JSON variables dynamically from a file or directory, recursively, during task runtime.
+ - If loading a directory, the files are sorted alphabetically before being loaded.
+ - This module is also supported for Windows targets.
+ - To assign included variables to a different host than C(inventory_hostname),
+ use C(delegate_to) and set C(delegate_facts=yes).
+version_added: "1.4"
+options:
+ file:
+ description:
+ - The file name from which variables should be loaded.
+ - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ type: path
+ version_added: "2.2"
+ dir:
+ description:
+ - The directory name from which the variables should be loaded.
+ - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
+ - If the path is relative and not inside a role, it will be parsed relative to the playbook.
+ type: path
+ version_added: "2.2"
+ name:
+ description:
+ - The name of a variable into which assign the included vars.
+ - If omitted (null) they will be made top level vars.
+ type: str
+ version_added: "2.2"
+ depth:
+ description:
+ - When using C(dir), this module will, by default, recursively go through each sub directory and load up the
+ variables. By explicitly setting the depth, this module will only go as deep as the depth.
+ type: int
+ default: 0
+ version_added: "2.2"
+ files_matching:
+ description:
+ - Limit the files that are loaded within any directory to this regular expression.
+ type: str
+ version_added: "2.2"
+ ignore_files:
+ description:
+ - List of file names to ignore.
+ type: list
+ elements: str
+ version_added: "2.2"
+ extensions:
+ description:
+ - List of file extensions to read when using C(dir).
+ type: list
+ elements: str
+ default: [ json, yaml, yml ]
+ version_added: "2.3"
+ ignore_unknown_extensions:
+ description:
+ - Ignore unknown file extensions within the directory.
+ - This allows users to specify a directory containing vars files that are intermingled with non-vars files extension types
+ (e.g. a directory with a README in it and vars files).
+ type: bool
+ default: no
+ version_added: "2.7"
+ hash_behaviour:
+ description:
+ - If set to C(merge), merges existing hash variables instead of overwriting them.
+ - If omitted C(null), the behavior falls back to the global I(hash_behaviour) configuration.
+ default: null
+ type: str
+ choices: ["replace", "merge"]
+ version_added: "2.12"
+ free-form:
+ description:
+ - This module allows you to specify the 'file' option directly without any other options.
+ - There is no 'free-form' option, this is just an indicator, see example below.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ details:
+ - while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
+ - connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
+ support: partial
+ diff_mode:
+ support: none
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+seealso:
+- module: ansible.builtin.set_fact
+- ref: playbooks_delegation
+ description: More information related to task delegation.
+'''
+
+EXAMPLES = r'''
+- name: Include vars of stuff.yaml into the 'stuff' variable (2.2).
+ ansible.builtin.include_vars:
+ file: stuff.yaml
+ name: stuff
+
+- name: Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
+ ansible.builtin.include_vars:
+ file: contingency_plan.yaml
+ name: plans
+ when: x == 0
+
+- name: Load a variable file based on the OS type, or a default if not found. Using free-form to specify the file.
+ ansible.builtin.include_vars: "{{ lookup('ansible.builtin.first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ansible_distribution}}.yaml'
+ - '{{ansible_os_family}}.yaml'
+ - default.yaml
+ paths:
+ - 'vars'
+
+- name: Bare include (free-form)
+ ansible.builtin.include_vars: myvars.yaml
+
+- name: Include all .json and .jsn files in vars/all and all nested directories (2.3)
+ ansible.builtin.include_vars:
+ dir: vars/all
+ extensions:
+ - 'json'
+ - 'jsn'
+
+- name: Include all default extension files in vars/all and all nested directories and save the output in test. (2.2)
+ ansible.builtin.include_vars:
+ dir: vars/all
+ name: test
+
+- name: Include default extension files in vars/services (2.2)
+ ansible.builtin.include_vars:
+ dir: vars/services
+ depth: 1
+
+- name: Include only files matching bastion.yaml (2.2)
+ ansible.builtin.include_vars:
+ dir: vars
+ files_matching: bastion.yaml
+
+- name: Include all .yaml files except bastion.yaml (2.3)
+ ansible.builtin.include_vars:
+ dir: vars
+ ignore_files:
+ - 'bastion.yaml'
+ extensions:
+ - 'yaml'
+
+- name: Ignore warnings raised for files with unknown extensions while loading (2.7)
+ ansible.builtin.include_vars:
+ dir: vars
+ ignore_unknown_extensions: True
+ extensions:
+ - ''
+ - 'yaml'
+ - 'yml'
+ - 'json'
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Variables that were included and their values
+ returned: success
+ type: dict
+ sample: {'variable': 'value'}
+ansible_included_var_files:
+ description: A list of files that were successfully included
+ returned: success
+ type: list
+ sample: [ /path/to/file.json, /path/to/file.yaml ]
+ version_added: '2.4'
+'''
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
new file mode 100644
index 0000000..f4dba73
--- /dev/null
+++ b/lib/ansible/modules/iptables.py
@@ -0,0 +1,916 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables
+short_description: Modify iptables rules
+version_added: "2.0"
+author:
+- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+- Sébastien DA ROCHA (@sebastiendarocha)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP packet
+ filter rules in the Linux kernel.
+ - This module does not handle the saving and/or loading of rules, but rather
+ only manipulates the current rules that are present in memory. This is the
+ same as the behaviour of the C(iptables) and C(ip6tables) command which
+ this module uses internally.
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: linux
+notes:
+ - This module just deals with individual rules. If you need advanced
+ chaining of rules the recommended way is to template the iptables restore
+ file.
+options:
+ table:
+ description:
+ - This option specifies the packet matching table which the command should operate on.
+ - If the kernel is configured with automatic module loading, an attempt will be made
+ to load the appropriate module for that table if it is not already there.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ default: filter
+ state:
+ description:
+ - Whether the rule should be absent or present.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ action:
+ description:
+ - Whether the rule should be appended at the bottom or inserted at the top.
+ - If the rule already exists the chain will not be modified.
+ type: str
+ choices: [ append, insert ]
+ default: append
+ version_added: "2.2"
+ rule_num:
+ description:
+ - Insert the rule as the given rule number.
+ - This works only with C(action=insert).
+ type: str
+ version_added: "2.5"
+ ip_version:
+ description:
+ - Which version of the IP protocol this rule should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ chain:
+ description:
+ - Specify the iptables chain to modify.
+ - This could be a user-defined chain or one of the standard iptables chains, like
+ C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
+ type: str
+ protocol:
+ description:
+ - The protocol of the rule or of the packet to check.
+ - The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(ipv6-icmp) or C(icmpv6),
+ C(esp), C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
+ representing one of these protocols or a different one.
+ - A protocol name from I(/etc/protocols) is also allowed.
+ - A C(!) argument before the protocol inverts the test.
+ - The number zero is equivalent to all.
+ - C(all) will match with all protocols and is taken as default when this option is omitted.
+ type: str
+ source:
+ description:
+ - Source specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ destination:
+ description:
+ - Destination specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags specification.
+ - C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
+ type: dict
+ default: {}
+ version_added: "2.4"
+ suboptions:
+ flags:
+ description:
+ - List of flags you want to examine.
+ type: list
+ elements: str
+ flags_set:
+ description:
+ - Flags to be set.
+ type: list
+ elements: str
+ match:
+ description:
+ - Specifies a match to use, that is, an extension module that tests for
+ a specific property.
+ - The set of matches make up the condition under which a target is invoked.
+ - Matches are evaluated first to last if specified as an array and work in short-circuit
+ fashion, i.e. if one extension yields false, evaluation will stop.
+ type: list
+ elements: str
+ default: []
+ jump:
+ description:
+ - This specifies the target of the rule; i.e., what to do if the packet matches it.
+ - The target can be a user-defined chain (other than the one
+ this rule is in), one of the special builtin targets which decide the
+ fate of the packet immediately, or an extension (see EXTENSIONS
+ below).
+ - If this option is omitted in a rule (and the goto parameter
+ is not used), then matching the rule will have no effect on the
+ packet's fate, but the counters on the rule will be incremented.
+ type: str
+ gateway:
+ description:
+ - This specifies the IP address of host to send the cloned packets.
+ - This option is only valid when C(jump) is set to C(TEE).
+ type: str
+ version_added: "2.8"
+ log_prefix:
+ description:
+ - Specifies a log text for the rule. Only make sense with a LOG jump.
+ type: str
+ version_added: "2.5"
+ log_level:
+ description:
+ - Logging level according to the syslogd-defined priorities.
+ - The value can be strings or numbers from 1-8.
+ - This parameter is only applicable if C(jump) is set to C(LOG).
+ type: str
+ version_added: "2.8"
+ choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
+ goto:
+ description:
+ - This specifies that the processing should continue in a user specified chain.
+ - Unlike the jump argument return will not continue processing in
+ this chain but instead in the chain that called us via jump.
+ type: str
+ in_interface:
+ description:
+ - Name of an interface via which a packet was received (only for packets
+ entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins with
+ this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ out_interface:
+ description:
+ - Name of an interface via which a packet is going to be sent (for
+ packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins
+ with this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ fragment:
+ description:
+ - This means that the rule only refers to second and further fragments
+ of fragmented packets.
+ - Since there is no way to tell the source or destination ports of such
+ a packet (or ICMP type), such a packet will not match any rules which specify them.
+ - When the "!" argument precedes fragment argument, the rule will only match head fragments,
+ or unfragmented packets.
+ type: str
+ set_counters:
+ description:
+ - This enables the administrator to initialize the packet and byte
+ counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
+ type: str
+ source_port:
+ description:
+ - Source port or port range specification.
+ - This can either be a service name or a port number.
+ - An inclusive range can also be specified, using the format C(first:last).
+ - If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
+ - If the first port is greater than the second one they will be swapped.
+ type: str
+ destination_port:
+ description:
+ - "Destination port or port range specification. This can either be
+ a service name or a port number. An inclusive range can also be
+ specified, using the format first:last. If the first port is omitted,
+ '0' is assumed; if the last is omitted, '65535' is assumed. If the
+ first port is greater than the second one they will be swapped.
+ This is only valid if the rule also specifies one of the following
+ protocols: tcp, udp, dccp or sctp."
+ type: str
+ destination_ports:
+ description:
+ - This specifies multiple destination port numbers or port ranges to match in the multiport module.
+ - It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
+ type: list
+ elements: str
+ version_added: "2.11"
+ to_ports:
+ description:
+ - This specifies a destination port or range of ports to use, without
+ this, the destination port is never altered.
+ - This is only valid if the rule also specifies one of the protocol
+ C(tcp), C(udp), C(dccp) or C(sctp).
+ type: str
+ to_destination:
+ description:
+ - This specifies a destination address to use with C(DNAT).
+ - Without this, the destination address is never altered.
+ type: str
+ version_added: "2.1"
+ to_source:
+ description:
+ - This specifies a source address to use with C(SNAT).
+ - Without this, the source address is never altered.
+ type: str
+ version_added: "2.2"
+ syn:
+ description:
+ - This allows matching packets that have the SYN bit set and the ACK
+ and RST bits unset.
+ - When negated, this matches all packets with the RST or the ACK bits set.
+ type: str
+ choices: [ ignore, match, negate ]
+ default: ignore
+ version_added: "2.5"
+ set_dscp_mark:
+ description:
+ - This allows specifying a DSCP mark to be added to packets.
+ It takes either an integer or hex value.
+ - Mutually exclusive with C(set_dscp_mark_class).
+ type: str
+ version_added: "2.1"
+ set_dscp_mark_class:
+ description:
+ - This allows specifying a predefined DiffServ class which will be
+ translated to the corresponding DSCP mark.
+ - Mutually exclusive with C(set_dscp_mark).
+ type: str
+ version_added: "2.1"
+ comment:
+ description:
+ - This specifies a comment that will be added to the rule.
+ type: str
+ ctstate:
+ description:
+ - A list of the connection states to match in the conntrack module.
+ - Possible values are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT).
+ type: list
+ elements: str
+ default: []
+ src_range:
+ description:
+ - Specifies the source IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ dst_range:
+ description:
+ - Specifies the destination IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ match_set:
+ description:
+ - Specifies a set name which can be defined by ipset.
+ - Must be used together with the match_set_flags parameter.
+ - When the C(!) argument is prepended then it inverts the rule.
+ - Uses the iptables set extension.
+ type: str
+ version_added: "2.11"
+ match_set_flags:
+ description:
+ - Specifies the necessary flags for the match_set parameter.
+ - Must be used together with the match_set parameter.
+ - Uses the iptables set extension.
+ type: str
+ choices: [ "src", "dst", "src,dst", "dst,src" ]
+ version_added: "2.11"
+ limit:
+ description:
+ - Specifies the maximum average number of matches to allow per second.
+ - The number can specify units explicitly, using C(/second), C(/minute),
+ C(/hour) or C(/day), or parts of them (so C(5/second) is the same as
+ C(5/s)).
+ type: str
+ limit_burst:
+ description:
+ - Specifies the maximum burst before the above limit kicks in.
+ type: str
+ version_added: "2.1"
+ uid_owner:
+ description:
+ - Specifies the UID or username to use in match by owner rule.
+ - From Ansible 2.6 when the C(!) argument is prepended then the it inverts
+ the rule to apply instead to all users except that one specified.
+ type: str
+ version_added: "2.1"
+ gid_owner:
+ description:
+ - Specifies the GID or group to use in match by owner rule.
+ type: str
+ version_added: "2.9"
+ reject_with:
+ description:
+ - 'Specifies the error packet type to return while rejecting. It implies
+ "jump: REJECT".'
+ type: str
+ version_added: "2.1"
+ icmp_type:
+ description:
+ - This allows specification of the ICMP type, which can be a numeric
+ ICMP type, type/code pair, or one of the ICMP type names shown by the
+ command 'iptables -p icmp -h'
+ type: str
+ version_added: "2.2"
+ flush:
+ description:
+ - Flushes the specified table and chain of all rules.
+ - If no chain is specified then the entire table is purged.
+ - Ignores all other parameters.
+ type: bool
+ default: false
+ version_added: "2.2"
+ policy:
+ description:
+ - Set the policy for the chain to the given target.
+ - Only built-in chains can have policies.
+ - This parameter requires the C(chain) parameter.
+ - If you specify this parameter, all other parameters will be ignored.
+ - This parameter is used to set default policy for the given C(chain).
+ Do not confuse this with C(jump) parameter.
+ type: str
+ choices: [ ACCEPT, DROP, QUEUE, RETURN ]
+ version_added: "2.2"
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent multiple instances of
+ the program from running concurrently.
+ type: str
+ version_added: "2.10"
+ chain_management:
+ description:
+ - If C(true) and C(state) is C(present), the chain will be created if needed.
+ - If C(true) and C(state) is C(absent), the chain will be deleted if the only
+ other parameter passed are C(chain) and optionally C(table).
+ type: bool
+ default: false
+ version_added: "2.13"
+'''
+
+EXAMPLES = r'''
+- name: Block specific IP
+ ansible.builtin.iptables:
+ chain: INPUT
+ source: 8.8.8.8
+ jump: DROP
+ become: yes
+
+- name: Forward port 80 to 8600
+ ansible.builtin.iptables:
+ table: nat
+ chain: PREROUTING
+ in_interface: eth0
+ protocol: tcp
+ match: tcp
+ destination_port: 80
+ jump: REDIRECT
+ to_ports: 8600
+ comment: Redirect web traffic to port 8600
+ become: yes
+
+- name: Allow related and established connections
+ ansible.builtin.iptables:
+ chain: INPUT
+ ctstate: ESTABLISHED,RELATED
+ jump: ACCEPT
+ become: yes
+
+- name: Allow new incoming SYN packets on TCP port 22 (SSH)
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 22
+ ctstate: NEW
+ syn: match
+ jump: ACCEPT
+ comment: Accept new SSH connections.
+
+- name: Match on IP ranges
+ ansible.builtin.iptables:
+ chain: FORWARD
+ src_range: 192.168.1.100-192.168.1.199
+ dst_range: 10.0.0.1-10.0.0.50
+ jump: ACCEPT
+
+- name: Allow source IPs defined in ipset "admin_hosts" on port 22
+ ansible.builtin.iptables:
+ chain: INPUT
+ match_set: admin_hosts
+ match_set_flags: src
+ destination_port: 22
+ jump: ALLOW
+
+- name: Tag all outbound tcp packets with DSCP mark 8
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark: 8
+ protocol: tcp
+
+- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark_class: CS1
+ protocol: tcp
+
+# Create the user-defined chain ALLOWLIST
+- iptables:
+ chain: ALLOWLIST
+ chain_management: true
+
+# Delete the user-defined chain ALLOWLIST
+- iptables:
+ chain: ALLOWLIST
+ chain_management: true
+ state: absent
+
+- name: Insert a rule on line 5
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 8080
+ jump: ACCEPT
+ action: insert
+ rule_num: 5
+
+# Think twice before running following task as this may lock target system
+- name: Set the policy for the INPUT chain to DROP
+ ansible.builtin.iptables:
+ chain: INPUT
+ policy: DROP
+
+- name: Reject tcp with tcp-reset
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ reject_with: tcp-reset
+ ip_version: ipv4
+
+- name: Set tcp flags
+ ansible.builtin.iptables:
+ chain: OUTPUT
+ jump: DROP
+ protocol: tcp
+ tcp_flags:
+ flags: ALL
+ flags_set:
+ - ACK
+ - RST
+ - SYN
+ - FIN
+
+- name: Iptables flush filter
+ ansible.builtin.iptables:
+ chain: "{{ item }}"
+ flush: yes
+ with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
+
+- name: Iptables flush nat
+ ansible.builtin.iptables:
+ table: nat
+ chain: '{{ item }}'
+ flush: yes
+ with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
+
+- name: Log packets arriving into an user-defined chain
+ ansible.builtin.iptables:
+ chain: LOGGING
+ action: append
+ state: present
+ limit: 2/second
+ limit_burst: 20
+ log_prefix: "IPTABLES:INFO: "
+ log_level: info
+
+- name: Allow connections on multiple ports
+ ansible.builtin.iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_ports:
+ - "80"
+ - "443"
+ - "8081:8083"
+ jump: ACCEPT
+'''
+
+import re
+
+from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
+
+IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
+
+BINS = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+ICMP_TYPE_OPTIONS = dict(
+ ipv4='--icmp-type',
+ ipv6='--icmpv6-type',
+)
+
+
+def append_param(rule, param, flag, is_list):
+ if is_list:
+ for item in param:
+ append_param(rule, item, flag, False)
+ else:
+ if param is not None:
+ if param[0] == '!':
+ rule.extend(['!', flag, param[1:]])
+ else:
+ rule.extend([flag, param])
+
+
+def append_tcp_flags(rule, param, flag):
+ if param:
+ if 'flags' in param and 'flags_set' in param:
+ rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
+
+
+def append_match_flag(rule, param, flag, negatable):
+ if param == 'match':
+ rule.extend([flag])
+ elif negatable and param == 'negate':
+ rule.extend(['!', flag])
+
+
+def append_csv(rule, param, flag):
+ if param:
+ rule.extend([flag, ','.join(param)])
+
+
+def append_match(rule, param, match):
+ if param:
+ rule.extend(['-m', match])
+
+
+def append_jump(rule, param, jump):
+ if param:
+ rule.extend(['-j', jump])
+
+
+def append_wait(rule, param, flag):
+ if param:
+ rule.extend([flag, param])
+
+
+def construct_rule(params):
+ rule = []
+ append_wait(rule, params['wait'], '-w')
+ append_param(rule, params['protocol'], '-p', False)
+ append_param(rule, params['source'], '-s', False)
+ append_param(rule, params['destination'], '-d', False)
+ append_param(rule, params['match'], '-m', True)
+ append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
+ append_param(rule, params['jump'], '-j', False)
+ if params.get('jump') and params['jump'].lower() == 'tee':
+ append_param(rule, params['gateway'], '--gateway', False)
+ append_param(rule, params['log_prefix'], '--log-prefix', False)
+ append_param(rule, params['log_level'], '--log-level', False)
+ append_param(rule, params['to_destination'], '--to-destination', False)
+ append_match(rule, params['destination_ports'], 'multiport')
+ append_csv(rule, params['destination_ports'], '--dports')
+ append_param(rule, params['to_source'], '--to-source', False)
+ append_param(rule, params['goto'], '-g', False)
+ append_param(rule, params['in_interface'], '-i', False)
+ append_param(rule, params['out_interface'], '-o', False)
+ append_param(rule, params['fragment'], '-f', False)
+ append_param(rule, params['set_counters'], '-c', False)
+ append_param(rule, params['source_port'], '--source-port', False)
+ append_param(rule, params['destination_port'], '--destination-port', False)
+ append_param(rule, params['to_ports'], '--to-ports', False)
+ append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
+ append_param(
+ rule,
+ params['set_dscp_mark_class'],
+ '--set-dscp-class',
+ False)
+ append_match_flag(rule, params['syn'], '--syn', True)
+ if 'conntrack' in params['match']:
+ append_csv(rule, params['ctstate'], '--ctstate')
+ elif 'state' in params['match']:
+ append_csv(rule, params['ctstate'], '--state')
+ elif params['ctstate']:
+ append_match(rule, params['ctstate'], 'conntrack')
+ append_csv(rule, params['ctstate'], '--ctstate')
+ if 'iprange' in params['match']:
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ elif params['src_range'] or params['dst_range']:
+ append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ if 'set' in params['match']:
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
+ elif params['match_set']:
+ append_match(rule, params['match_set'], 'set')
+ append_param(rule, params['match_set'], '--match-set', False)
+ append_match_flag(rule, 'match', params['match_set_flags'], False)
+ append_match(rule, params['limit'] or params['limit_burst'], 'limit')
+ append_param(rule, params['limit'], '--limit', False)
+ append_param(rule, params['limit_burst'], '--limit-burst', False)
+ append_match(rule, params['uid_owner'], 'owner')
+ append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
+ append_param(rule, params['uid_owner'], '--uid-owner', False)
+ append_match(rule, params['gid_owner'], 'owner')
+ append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
+ append_param(rule, params['gid_owner'], '--gid-owner', False)
+ if params['jump'] is None:
+ append_jump(rule, params['reject_with'], 'REJECT')
+ append_param(rule, params['reject_with'], '--reject-with', False)
+ append_param(
+ rule,
+ params['icmp_type'],
+ ICMP_TYPE_OPTIONS[params['ip_version']],
+ False)
+ append_match(rule, params['comment'], 'comment')
+ append_param(rule, params['comment'], '--comment', False)
+ return rule
+
+
+def push_arguments(iptables_path, action, params, make_rule=True):
+ cmd = [iptables_path]
+ cmd.extend(['-t', params['table']])
+ cmd.extend([action, params['chain']])
+ if action == '-I' and params['rule_num']:
+ cmd.extend([params['rule_num']])
+ if make_rule:
+ cmd.extend(construct_rule(params))
+ return cmd
+
+
+def check_rule_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-C', params)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def append_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-A', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def insert_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-I', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def remove_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-D', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def flush_table(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def set_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
+ cmd.append(params['policy'])
+ module.run_command(cmd, check_rc=True)
+
+
+def get_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ chain_header = out.split("\n")[0]
+ result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
+ if result:
+ return result.group(1)
+ return None
+
+
+def get_iptables_version(iptables_path, module):
+ cmd = [iptables_path, '--version']
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ return out.split('v')[1].rstrip('\n')
+
+
+def create_chain(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-N', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def check_chain_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def delete_chain(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-X', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ action=dict(type='str', default='append', choices=['append', 'insert']),
+ ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
+ chain=dict(type='str'),
+ rule_num=dict(type='str'),
+ protocol=dict(type='str'),
+ wait=dict(type='str'),
+ source=dict(type='str'),
+ to_source=dict(type='str'),
+ destination=dict(type='str'),
+ to_destination=dict(type='str'),
+ match=dict(type='list', elements='str', default=[]),
+ tcp_flags=dict(type='dict',
+ options=dict(
+ flags=dict(type='list', elements='str'),
+ flags_set=dict(type='list', elements='str'))
+ ),
+ jump=dict(type='str'),
+ gateway=dict(type='str'),
+ log_prefix=dict(type='str'),
+ log_level=dict(type='str',
+ choices=['0', '1', '2', '3', '4', '5', '6', '7',
+ 'emerg', 'alert', 'crit', 'error',
+ 'warning', 'notice', 'info', 'debug'],
+ default=None,
+ ),
+ goto=dict(type='str'),
+ in_interface=dict(type='str'),
+ out_interface=dict(type='str'),
+ fragment=dict(type='str'),
+ set_counters=dict(type='str'),
+ source_port=dict(type='str'),
+ destination_port=dict(type='str'),
+ destination_ports=dict(type='list', elements='str', default=[]),
+ to_ports=dict(type='str'),
+ set_dscp_mark=dict(type='str'),
+ set_dscp_mark_class=dict(type='str'),
+ comment=dict(type='str'),
+ ctstate=dict(type='list', elements='str', default=[]),
+ src_range=dict(type='str'),
+ dst_range=dict(type='str'),
+ match_set=dict(type='str'),
+ match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
+ limit=dict(type='str'),
+ limit_burst=dict(type='str'),
+ uid_owner=dict(type='str'),
+ gid_owner=dict(type='str'),
+ reject_with=dict(type='str'),
+ icmp_type=dict(type='str'),
+ syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
+ flush=dict(type='bool', default=False),
+ policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
+ chain_management=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=(
+ ['set_dscp_mark', 'set_dscp_mark_class'],
+ ['flush', 'policy'],
+ ),
+ required_if=[
+ ['jump', 'TEE', ['gateway']],
+ ['jump', 'tee', ['gateway']],
+ ]
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ ip_version=module.params['ip_version'],
+ table=module.params['table'],
+ chain=module.params['chain'],
+ flush=module.params['flush'],
+ rule=' '.join(construct_rule(module.params)),
+ state=module.params['state'],
+ chain_management=module.params['chain_management'],
+ )
+
+ ip_version = module.params['ip_version']
+ iptables_path = module.get_bin_path(BINS[ip_version], True)
+
+ # Check if chain option is required
+ if args['flush'] is False and args['chain'] is None:
+ module.fail_json(msg="Either chain or flush parameter must be specified.")
+
+ if module.params.get('log_prefix', None) or module.params.get('log_level', None):
+ if module.params['jump'] is None:
+ module.params['jump'] = 'LOG'
+ elif module.params['jump'] != 'LOG':
+ module.fail_json(msg="Logging options can only be used with the LOG jump target.")
+
+ # Check if wait option is supported
+ iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
+
+ if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
+ if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
+ module.params['wait'] = ''
+ else:
+ module.params['wait'] = None
+
+ # Flush the table
+ if args['flush'] is True:
+ args['changed'] = True
+ if not module.check_mode:
+ flush_table(iptables_path, module, module.params)
+
+ # Set the policy
+ elif module.params['policy']:
+ current_policy = get_chain_policy(iptables_path, module, module.params)
+ if not current_policy:
+ module.fail_json(msg='Can\'t detect current policy')
+
+ changed = current_policy != module.params['policy']
+ args['changed'] = changed
+ if changed and not module.check_mode:
+ set_chain_policy(iptables_path, module, module.params)
+
+ # Delete the chain if there is no rule in the arguments
+ elif (args['state'] == 'absent') and not args['rule']:
+ chain_is_present = check_chain_present(
+ iptables_path, module, module.params
+ )
+ args['changed'] = chain_is_present
+
+ if (chain_is_present and args['chain_management'] and not module.check_mode):
+ delete_chain(iptables_path, module, module.params)
+
+ else:
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_rule_present(
+ iptables_path, module, module.params
+ )
+ chain_is_present = rule_is_present or check_chain_present(
+ iptables_path, module, module.params
+ )
+ should_be_present = (args['state'] == 'present')
+
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+ if args['changed'] is False:
+ # Target is already up to date
+ module.exit_json(**args)
+
+ # Check only; don't modify
+ if not module.check_mode:
+ if should_be_present:
+ if not chain_is_present and args['chain_management']:
+ create_chain(iptables_path, module, module.params)
+
+ if insert:
+ insert_rule(iptables_path, module, module.params)
+ else:
+ append_rule(iptables_path, module, module.params)
+ else:
+ remove_rule(iptables_path, module, module.params)
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
new file mode 100644
index 0000000..b0c8888
--- /dev/null
+++ b/lib/ansible/modules/known_hosts.py
@@ -0,0 +1,365 @@
+
+# Copyright: (c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: known_hosts
+short_description: Add or remove a host from the C(known_hosts) file
+description:
+ - The C(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
+ This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
+ - If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
+version_added: "1.9"
+options:
+ name:
+ aliases: [ 'host' ]
+ description:
+ - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
+ - Must match with <hostname> or <ip> present in key attribute.
+ - For custom SSH port, C(name) needs to specify port as well. See example section.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public host key, as a string.
+ - Required if C(state=present), optional when C(state=absent), in which case all keys for the host are removed.
+ - The key must be in the right format for SSH (see sshd(8), section "SSH_KNOWN_HOSTS FILE FORMAT").
+ - Specifically, the key should not match the format that is found in an SSH pubkey file, but should rather have the hostname prepended to a
+ line that includes the pubkey, the same way that it would appear in the known_hosts file. The value prepended to the line must also match
+ the value of the name parameter.
+ - Should be of format C(<hostname[,IP]> ssh-rsa <pubkey>).
+ - For custom SSH port, C(key) needs to specify port as well. See example section.
+ type: str
+ path:
+ description:
+ - The known_hosts file to edit.
+ - The known_hosts file will be created if needed. The rest of the path must exist prior to running the module.
+ default: "~/.ssh/known_hosts"
+ type: path
+ hash_host:
+ description:
+ - Hash the hostname in the known_hosts file.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ state:
+ description:
+ - I(present) to add the host key.
+ - I(absent) to remove it.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+extends_documentation_fragment:
+ - action_common_attributes
+author:
+- Matthew Vernon (@mcv21)
+'''
+
+EXAMPLES = r'''
+- name: Tell the host about our servers it might want to ssh to
+ ansible.builtin.known_hosts:
+ path: /etc/ssh/ssh_known_hosts
+ name: foo.com.invalid
+ key: "{{ lookup('ansible.builtin.file', 'pubkeys/foo.com.invalid') }}"
+
+- name: Another way to call known_hosts
+ ansible.builtin.known_hosts:
+ name: host1.example.com # or 10.9.8.77
+ key: host1.example.com,10.9.8.77 ssh-rsa ASDeararAIUHI324324 # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+
+- name: Add host with custom SSH port
+ ansible.builtin.known_hosts:
+ name: '[host1.example.com]:2222'
+ key: '[host1.example.com]:2222 ssh-rsa ASDeararAIUHI324324' # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+'''
+
+# Makes sure public host keys are present or absent in the given known_hosts
+# file.
+#
+# Arguments
+# =========
+# name = hostname whose key should be added (alias: host)
+# key = line(s) to add to known_hosts file
+# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
+# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
+# state = absent|present (default: present)
+
+import base64
+import errno
+import hashlib
+import hmac
+import os
+import os.path
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def enforce_state(module, params):
+ """
+ Add or remove key.
+ """
+
+ host = params["name"].lower()
+ key = params.get("key", None)
+ path = params.get("path")
+ hash_host = params.get("hash_host")
+ state = params.get("state")
+ # Find the ssh-keygen binary
+ sshkeygen = module.get_bin_path("ssh-keygen", True)
+
+ if not key and state != "absent":
+ module.fail_json(msg="No key specified when adding a host")
+
+ if key and hash_host:
+ key = hash_host_key(host, key)
+
+ # Trailing newline in files gets lost, so re-add if necessary
+ if key and not key.endswith('\n'):
+ key += '\n'
+
+ sanity_check(module, host, key, sshkeygen)
+
+ found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
+
+ params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
+
+ # check if we are trying to remove a non matching key,
+ # in that case return with no change to the host
+ if state == 'absent' and not found_line and key:
+ params['changed'] = False
+ return params
+
+ # We will change state if found==True & state!="present"
+ # or found==False & state=="present"
+ # i.e found XOR (state=="present")
+ # Alternatively, if replace is true (i.e. key present, and we must change
+ # it)
+ if module.check_mode:
+ module.exit_json(changed=replace_or_add or (state == "present") != found,
+ diff=params['diff'])
+
+ # Now do the work.
+
+ # Only remove whole host if found and no key provided
+ if found and not key and state == "absent":
+ module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
+ params['changed'] = True
+
+ # Next, add a new (or replacing) entry
+ if replace_or_add or found != (state == "present"):
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ inf = None
+ else:
+ module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
+ try:
+ with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
+ if inf is not None:
+ for line_number, line in enumerate(inf):
+ if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
+ continue # skip this line to replace its key
+ outf.write(line)
+ inf.close()
+ if state == 'present':
+ outf.write(key)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
+ else:
+ module.atomic_move(outf.name, path)
+
+ params['changed'] = True
+
+ return params
+
+
+def sanity_check(module, host, key, sshkeygen):
+ '''Check supplied key is sensible
+
+ host and key are parameters provided by the user; If the host
+ provided is inconsistent with the key supplied, then this function
+ quits, providing an error to the user.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ # If no key supplied, we're doing a removal, and have nothing to check here.
+ if not key:
+ return
+ # Rather than parsing the key ourselves, get ssh-keygen to do it
+ # (this is essential for hashed keys, but otherwise useful, as the
+ # key question is whether ssh-keygen thinks the key matches the host).
+
+ # The approach is to write the key to a temporary file,
+ # and then attempt to look up the specified host in that file.
+
+ if re.search(r'\S+(\s+)?,(\s+)?', host):
+ module.fail_json(msg="Comma separated list of names is not supported. "
+ "Please pass a single name to lookup in the known_hosts file.")
+
+ with tempfile.NamedTemporaryFile(mode='w+') as outf:
+ try:
+ outf.write(key)
+ outf.flush()
+ except IOError as e:
+ module.fail_json(msg="Failed to write to temporary file %s: %s" %
+ (outf.name, to_native(e)))
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
+ rc, stdout, stderr = module.run_command(sshkeygen_command)
+
+ if stdout == '': # host not found
+ module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
+
+
+def search_for_host_key(module, host, key, path, sshkeygen):
+ '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
+
+ Looks up host and keytype in the known_hosts file path; if it's there, looks to see
+ if one of those entries matches key. Returns:
+ found (Boolean): is host found in path?
+ replace_or_add (Boolean): is the key in path different to that supplied by user?
+ found_line (int or None): the line where a key of the same type was found
+ if found=False, then replace is always False.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ if os.path.exists(path) is False:
+ return False, False, None
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
+
+ # openssh >=6.4 has changed ssh-keygen behaviour such that it returns
+ # 1 if no host is found, whereas previously it returned 0
+ rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
+ if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
+ return False, False, None # host not found, no other errors
+ if rc != 0: # something went wrong
+ module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
+
+ # If user supplied no key, we don't want to try and replace anything with it
+ if not key:
+ return True, False, None
+
+ lines = stdout.split('\n')
+ new_key = normalize_known_hosts_key(key)
+
+ for lnum, l in enumerate(lines):
+ if l == '':
+ continue
+ elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
+ try:
+ # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
+ # It always outputs the non-localized comment before the found key
+ found_line = int(re.search(r'found: line (\d+)', l).group(1))
+ except IndexError:
+ module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
+ else:
+ found_key = normalize_known_hosts_key(l)
+ if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed
+ new_key['host'] = found_key['host']
+ if new_key == found_key: # found a match
+ return True, False, found_line # found exactly the same key, don't replace
+ elif new_key['type'] == found_key['type']: # found a different key for the same key type
+ return True, True, found_line
+
+ # No match found, return found and replace, but no line
+ return True, True, None
+
+
+def hash_host_key(host, key):
+ hmac_key = os.urandom(20)
+ hashed_host = hmac.new(hmac_key, to_bytes(host), hashlib.sha1).digest()
+ parts = key.strip().split()
+ # @ indicates the optional marker field used for @cert-authority or @revoked
+ i = 1 if parts[0][0] == '@' else 0
+ parts[i] = '|1|%s|%s' % (to_native(base64.b64encode(hmac_key)), to_native(base64.b64encode(hashed_host)))
+ return ' '.join(parts)
+
+
+def normalize_known_hosts_key(key):
+ '''
+ Transform a key, either taken from a known_host file or provided by the
+ user, into a normalized form.
+ The host part (which might include multiple hostnames or be hashed) gets
+ replaced by the provided host. Also, any spurious information gets removed
+ from the end (like the username@host tag usually present in hostkeys, but
+ absent in known_hosts files)
+ '''
+ key = key.strip() # trim trailing newline
+ k = key.split()
+ d = dict()
+ # The optional "marker" field, used for @cert-authority or @revoked
+ if k[0][0] == '@':
+ d['options'] = k[0]
+ d['host'] = k[1]
+ d['type'] = k[2]
+ d['key'] = k[3]
+ else:
+ d['host'] = k[0]
+ d['type'] = k[1]
+ d['key'] = k[2]
+ return d
+
+
+def compute_diff(path, found_line, replace_or_add, state, key):
+ diff = {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': '',
+ 'after': '',
+ }
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ diff['before_header'] = '/dev/null'
+ else:
+ diff['before'] = inf.read()
+ inf.close()
+ lines = diff['before'].splitlines(1)
+ if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
+ del lines[found_line - 1]
+ if state == 'present' and (replace_or_add or found_line is None):
+ lines.append(key)
+ diff['after'] = ''.join(lines)
+ return diff
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['host']),
+ key=dict(required=False, type='str', no_log=False),
+ path=dict(default="~/.ssh/known_hosts", type='path'),
+ hash_host=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ results = enforce_state(module, module.params)
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
new file mode 100644
index 0000000..0e1b76f
--- /dev/null
+++ b/lib/ansible/modules/lineinfile.py
@@ -0,0 +1,638 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lineinfile
+short_description: Manage lines in text files
+description:
+ - This module ensures a particular line is in a file, or replace an
+ existing line using a back-referenced regular expression.
+ - This is primarily useful when you want to change a single line in a file only.
+ - See the M(ansible.builtin.replace) module if you want to change multiple, similar lines
+ or check M(ansible.builtin.blockinfile) if you want to insert/update/remove a block of lines in a file.
+ For other cases, see the M(ansible.builtin.copy) or M(ansible.builtin.template) modules.
+version_added: "0.7"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in every line of the file.
+ - For C(state=present), the pattern to replace if found. Only the last line found will be replaced.
+ - For C(state=absent), the pattern of the line(s) to remove.
+ - If the regular expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - When modifying a line the regexp should typically match both the initial state of
+ the line as well as its state after replacement by C(line) to ensure idempotence.
+ - Uses Python regular expressions. See U(https://docs.python.org/3/library/re.html).
+ type: str
+ aliases: [ regex ]
+ version_added: '1.7'
+ search_string:
+ description:
+ - The literal string to look for in every line of the file. This does not have to match the entire line.
+ - For C(state=present), the line to replace if the string is found in the file. Only the last line found will be replaced.
+ - For C(state=absent), the line(s) to remove if the string is in the line.
+ - If the literal expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - Mutually exclusive with C(backrefs) and C(regexp).
+ type: str
+ version_added: '2.11'
+ state:
+ description:
+ - Whether the line should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ line:
+ description:
+ - The line to insert/replace into the file.
+ - Required for C(state=present).
+ - If C(backrefs) is set, may contain backreferences that will get
+ expanded with the C(regexp) capture groups if the regexp matches.
+ type: str
+ aliases: [ value ]
+ backrefs:
+ description:
+ - Used with C(state=present).
+ - If set, C(line) can contain backreferences (both positional and named)
+ that will get populated if the C(regexp) matches.
+ - This parameter changes the operation of the module slightly;
+ C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
+ does not match anywhere in the file, the file will be left unchanged.
+ - If the C(regexp) does match, the last matching line will be replaced by
+ the expanded line parameter.
+ - Mutually exclusive with C(search_string).
+ type: bool
+ default: no
+ version_added: "1.1"
+ insertafter:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted after the last match of specified regular expression.
+ - If the first match is required, use(firstmatch=yes).
+ - A special value is available; C(EOF) for inserting the line at the end of the file.
+ - If specified regular expression has no matches, EOF will be used instead.
+ - If C(insertbefore) is set, default value C(EOF) will be ignored.
+ - If regular expressions are passed to both C(regexp) and C(insertafter), C(insertafter) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertbefore).
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted before the last match of specified regular expression.
+ - If the first match is required, use C(firstmatch=yes).
+ - A value is available; C(BOF) for inserting the line at the beginning of the file.
+ - If specified regular expression has no matches, the line will be inserted at the end of the file.
+ - If regular expressions are passed to both C(regexp) and C(insertbefore), C(insertbefore) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertafter).
+ type: str
+ choices: [ BOF, '*regex*' ]
+ version_added: "1.1"
+ create:
+ description:
+ - Used with C(state=present).
+ - If specified, the file will be created if it does not already exist.
+ - By default it will fail if the file is missing.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ firstmatch:
+ description:
+ - Used with C(insertafter) or C(insertbefore).
+ - If set, C(insertafter) and C(insertbefore) will work with the first line that matches the given regular expression.
+ type: bool
+ default: no
+ version_added: "2.5"
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: none
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+seealso:
+- module: ansible.builtin.blockinfile
+- module: ansible.builtin.copy
+- module: ansible.builtin.file
+- module: ansible.builtin.replace
+- module: ansible.builtin.template
+- module: community.windows.win_lineinfile
+author:
+ - Daniel Hokka Zakrissoni (@dhozac)
+ - Ahti Kitsik (@ahtik)
+ - Jose Angel Munoz (@imjoseangel)
+'''
+
+EXAMPLES = r'''
+# NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
+- name: Ensure SELinux is set to enforcing mode
+ ansible.builtin.lineinfile:
+ path: /etc/selinux/config
+ regexp: '^SELINUX='
+ line: SELINUX=enforcing
+
+- name: Make sure group wheel is not in the sudoers configuration
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ state: absent
+ regexp: '^%wheel'
+
+- name: Replace a localhost entry with our own
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ regexp: '^127\.0\.0\.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
+- name: Replace a localhost entry searching for a literal string to avoid escaping
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ search_string: '127.0.0.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
+- name: Ensure the default Apache port is 8080
+ ansible.builtin.lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ regexp: '^Listen '
+ insertafter: '^#Listen '
+ line: Listen 8080
+
+- name: Ensure php extension matches new pattern
+ ansible.builtin.lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ search_string: '<FilesMatch ".php[45]?$">'
+ insertafter: '^\t<Location \/>\n'
+ line: ' <FilesMatch ".php[34]?$">'
+
+- name: Ensure we have our own comment added to /etc/services
+ ansible.builtin.lineinfile:
+ path: /etc/services
+ regexp: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
+
+- name: Add a line to a file if the file does not exist, without passing regexp
+ ansible.builtin.lineinfile:
+ path: /tmp/testfile
+ line: 192.168.1.99 foo.lab.net foo
+ create: yes
+
+# NOTE: Yaml requires escaping backslashes in double quotes but not in single quotes
+- name: Ensure the JBoss memory settings are exactly as needed
+ ansible.builtin.lineinfile:
+ path: /opt/jboss-as/bin/standalone.conf
+ regexp: '^(.*)Xms(\d+)m(.*)$'
+ line: '\1Xms${xms}m\3'
+ backrefs: yes
+
+# NOTE: Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
+- name: Validate the sudoers file before saving
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ state: present
+ regexp: '^%ADMIN ALL='
+ line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
+ validate: /usr/sbin/visudo -cf %s
+
+# See https://docs.python.org/3/library/re.html for further details on syntax
+- name: Use backrefs with alternative group syntax to avoid conflicts with variable values
+ ansible.builtin.lineinfile:
+ path: /tmp/config
+ regexp: ^(host=).*
+ line: \g<1>{{ hostname }}
+ backrefs: yes
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def write_changes(module, b_lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ with os.fdopen(tmpfd, 'wb') as f:
+ f.writelines(b_lines)
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile,
+ to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
+ unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def present(module, dest, regexp, search_string, line, insertafter, insertbefore, create,
+ backup, backrefs, firstmatch):
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
+ b_destpath = os.path.dirname(b_dest)
+ if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
+ try:
+ os.makedirs(b_destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s (%s)' % (to_text(b_destpath), to_text(e)))
+
+ b_lines = []
+ else:
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+
+ if insertafter not in (None, 'BOF', 'EOF'):
+ bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ bre_ins = None
+
+ # index[0] is the line num where regexp has been found
+ # index[1] is the line num where insertafter/insertbefore has been found
+ index = [-1, -1]
+ match = None
+ exact_line_match = False
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ # The module's doc says
+ # "If regular expressions are passed to both regexp and
+ # insertafter, insertafter is only honored if no match for regexp is found."
+ # Therefore:
+ # 1. regexp or search_string was found -> ignore insertafter, replace the founded line
+ # 2. regexp or search_string was not found -> insert the line after 'insertafter' or 'insertbefore' line
+
+ # Given the above:
+ # 1. First check that there is no match for regexp:
+ if regexp is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = bre_m.search(b_cur_line)
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 2. Second check that there is no match for search_string:
+ if search_string is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 3. When no match found on the previous step,
+ # parse for searching insertafter/insertbefore:
+ if not match:
+ for lineno, b_cur_line in enumerate(b_lines):
+ if b_line == b_cur_line.rstrip(b'\r\n'):
+ index[0] = lineno
+ exact_line_match = True
+
+ elif bre_ins is not None and bre_ins.search(b_cur_line):
+ if insertafter:
+ # + 1 for the next line
+ index[1] = lineno + 1
+ if firstmatch:
+ break
+
+ if insertbefore:
+ # index[1] for the previous line
+ index[1] = lineno
+ if firstmatch:
+ break
+
+ msg = ''
+ changed = False
+ b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
+ # Exact line or Regexp matched a line in the file
+ if index[0] != -1:
+ if backrefs and match:
+ b_new_line = match.expand(b_line)
+ else:
+ # Don't do backref expansion if not asked.
+ b_new_line = b_line
+
+ if not b_new_line.endswith(b_linesep):
+ b_new_line += b_linesep
+
+ # If no regexp or search_string was given and no line match is found anywhere in the file,
+ # insert the line appropriately if using insertbefore or insertafter
+ if (regexp, search_string, match) == (None, None, None) and not exact_line_match:
+
+ # Insert lines
+ if insertafter and insertafter != 'EOF':
+ # Ensure there is a line separator after the found string
+ # at the end of the file.
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines[-1] = b_lines[-1] + b_linesep
+
+ # If the line to insert after is at the end of the file
+ # use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertbefore and insertbefore != 'BOF':
+ # If the line to insert before is at the beginning of the file
+ # use the appropriate index value.
+ if index[1] <= 0:
+ if b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[0]] != b_new_line:
+ b_lines[index[0]] = b_new_line
+ msg = 'line replaced'
+ changed = True
+
+ elif backrefs:
+ # Do absolutely nothing, since it's not safe generating the line
+ # without the regexp matching to populate the backrefs.
+ pass
+ # Add it to the beginning of the file
+ elif insertbefore == 'BOF' or insertafter == 'BOF':
+ b_lines.insert(0, b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ # Add it to the end of the file if requested or
+ # if insertafter/insertbefore didn't match anything
+ # (so default behaviour is to add at the end)
+ elif insertafter == 'EOF' or index[1] == -1:
+
+ # If the file is not empty then ensure there's a newline before the added line
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines.append(b_linesep)
+
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertafter and index[1] != -1:
+
+ # Don't insert the line if it already matches at the index.
+ # If the line to insert after is at the end of the file use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ # insert matched, but not the regexp or search_string
+ else:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup and os.path.exists(b_dest):
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if module.check_mode and not os.path.exists(b_dest):
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
+
+
+def absent(module, dest, regexp, search_string, line, backup):
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ module.exit_json(changed=False, msg="file not present")
+
+ msg = ''
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+ found = []
+
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ def matcher(b_cur_line):
+ if regexp is not None:
+ match_found = bre_c.search(b_cur_line)
+ elif search_string is not None:
+ match_found = to_bytes(search_string, errors='surrogate_or_strict') in b_cur_line
+ else:
+ match_found = b_line == b_cur_line.rstrip(b'\r\n')
+ if match_found:
+ found.append(b_cur_line)
+ return not match_found
+
+ b_lines = [l for l in b_lines if matcher(l)]
+ changed = len(found) > 0
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup:
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if changed:
+ msg = "%s line(s) removed" % len(found)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+
+ module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ regexp=dict(type='str', aliases=['regex']),
+ search_string=dict(type='str'),
+ line=dict(type='str', aliases=['value']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ backrefs=dict(type='bool', default=False),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ firstmatch=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['insertbefore', 'insertafter'], ['regexp', 'search_string'], ['backrefs', 'search_string']],
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ create = params['create']
+ backup = params['backup']
+ backrefs = params['backrefs']
+ path = params['path']
+ firstmatch = params['firstmatch']
+ regexp = params['regexp']
+ search_string = params['search_string']
+ line = params['line']
+
+ if '' in [regexp, search_string]:
+ msg = ("The %s is an empty string, which will match every line in the file. "
+ "This may have unintended consequences, such as replacing the last line in the file rather than appending.")
+ param_name = 'search string'
+ if regexp == '':
+ param_name = 'regular expression'
+ msg += " If this is desired, use '^' to match every line in the file and avoid this warning."
+ module.warn(msg % param_name)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.isdir(b_path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if params['state'] == 'present':
+ if backrefs and regexp is None:
+ module.fail_json(msg='regexp is required with backrefs=true')
+
+ if line is None:
+ module.fail_json(msg='line is required with state=present')
+
+ # Deal with the insertafter default value manually, to avoid errors
+ # because of the mutually_exclusive mechanism.
+ ins_bef, ins_aft = params['insertbefore'], params['insertafter']
+ if ins_bef is None and ins_aft is None:
+ ins_aft = 'EOF'
+
+ present(module, path, regexp, search_string, line,
+ ins_aft, ins_bef, create, backup, backrefs, firstmatch)
+ else:
+ if (regexp, search_string, line) == (None, None, None):
+ module.fail_json(msg='one of line, search_string, or regexp is required with state=absent')
+
+ absent(module, path, regexp, search_string, line, backup)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
new file mode 100644
index 0000000..1b062c9
--- /dev/null
+++ b/lib/ansible/modules/meta.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, a Red Hat company
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: meta
+short_description: Execute Ansible 'actions'
+version_added: '1.2'
+description:
+ - Meta tasks are a special kind of task which can influence Ansible internal execution or state.
+ - Meta tasks can be used anywhere within your playbook.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - This module takes a free form command, as a string. There is not an actual option named "free form". See the examples!
+ - C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
+ points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
+ - C(refresh_inventory) (added in Ansible 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
+ re-executed. If the dynamic inventory script is using a cache, Ansible cannot know this and has no way of refreshing it (you can disable the cache
+ or, if available for your specific inventory datasource (e.g. aws), you can use the an inventory plugin instead of an inventory script).
+ This is mainly useful when additional hosts are created and users wish to use them instead of using the M(ansible.builtin.add_host) module.
+ - C(noop) (added in Ansible 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use.
+ - C(clear_facts) (added in Ansible 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared,
+ including the fact cache.
+ - C(clear_host_errors) (added in Ansible 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts.
+ - C(end_play) (added in Ansible 2.2) causes the play to end without failing the host(s). Note that this affects all hosts.
+ - C(reset_connection) (added in Ansible 2.3) interrupts a persistent connection (i.e. ssh + control persist)
+ - C(end_host) (added in Ansible 2.8) is a per-host variation of C(end_play). Causes the play to end for the current host without failing it.
+ - C(end_batch) (added in Ansible 2.12) causes the current batch (see C(serial)) to end without failing the host(s).
+ Note that with C(serial=0) or undefined this behaves the same as C(end_play).
+ choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection, end_batch ]
+ required: true
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ support: none
+ bypass_host_loop:
+ details: Some of the subactions ignore the host loop, see the description above for each specific action for the exceptions
+ support: partial
+ bypass_task_loop:
+ details: Most of the subactions ignore the task loop, see the description above for each specific action for the exceptions
+ support: partial
+ check_mode:
+ details: While these actions don't modify the targets directly they do change possible states of the target within the run
+ support: partial
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ ignore_conditional:
+ details: Only some options support conditionals and when they do they act 'bypassing the host loop', taking the values from first available host
+ support: partial
+ connection:
+ details: Most options in this action do not use a connection, except C(reset_connection) which still does not connect to the remote
+ support: partial
+notes:
+ - C(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using C(cacheable=True),
+ but not the current host variable it creates for the current run.
+ - Skipping C(meta) tasks with tags is not supported before Ansible 2.11.
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = r'''
+# Example showing flushing handlers on demand, not at end of play
+- ansible.builtin.template:
+ src: new.j2
+ dest: /etc/config.txt
+ notify: myhandler
+
+- name: Force all notified handlers to run at this point, not waiting for normal sync points
+ ansible.builtin.meta: flush_handlers
+
+# Example showing how to refresh inventory during play
+- name: Reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
+ cloud_guest: # this is fake module
+ name: newhost
+ state: present
+
+- name: Refresh inventory to ensure new instances exist in inventory
+ ansible.builtin.meta: refresh_inventory
+
+# Example showing how to clear all existing facts of targeted hosts
+- name: Clear gathered facts from all currently targeted hosts
+ ansible.builtin.meta: clear_facts
+
+# Example showing how to continue using a failed target
+- name: Bring host back to play after failure
+ ansible.builtin.copy:
+ src: file
+ dest: /etc/file
+ remote_user: imightnothavepermission
+
+- ansible.builtin.meta: clear_host_errors
+
+# Example showing how to reset an existing connection
+- ansible.builtin.user:
+ name: '{{ ansible_user }}'
+ groups: input
+
+- name: Reset ssh connection to allow user changes to affect 'current login user'
+ ansible.builtin.meta: reset_connection
+
+# Example showing how to end the play for specific targets
+- name: End the play for hosts that run CentOS 6
+ ansible.builtin.meta: end_host
+ when:
+ - ansible_distribution == 'CentOS'
+ - ansible_distribution_major_version == '6'
+'''
diff --git a/lib/ansible/modules/package.py b/lib/ansible/modules/package.py
new file mode 100644
index 0000000..6078739
--- /dev/null
+++ b/lib/ansible/modules/package.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: package
+version_added: 2.0
+author:
+ - Ansible Core Team
+short_description: Generic OS package manager
+description:
+ - This modules manages packages on a target without specifying a package manager module (like M(ansible.builtin.yum), M(ansible.builtin.apt), ...).
+ It is convenient to use in an heterogeneous environment of machines without having to create a specific task for
+ each package manager. C(package) calls behind the module for the package manager used by the operating system
+ discovered by the module M(ansible.builtin.setup). If C(setup) was not yet run, C(package) will run it.
+ - This module acts as a proxy to the underlying package manager module. While all arguments will be passed to the
+ underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
+ of module arguments that all packaging modules support.
+ - For Windows targets, use the M(ansible.windows.win_package) module instead.
+options:
+ name:
+ description:
+ - Package name, or package specifier with version.
+ - Syntax varies with package manager. For example C(name-1.0) or C(name=1.0).
+ - Package names also vary with package manager; this module will not "translate" them per distro. For example C(libyaml-dev), C(libyaml-devel).
+ required: true
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - You can use other states like C(latest) ONLY if they are supported by the underlying package module(s) executed.
+ required: true
+ use:
+ description:
+ - The required package manager module to use (C(yum), C(apt), and so on). The default 'auto' will use existing facts or try to autodetect it.
+ - You should only use this field if the automatic selection is not working for some reason.
+ default: auto
+requirements:
+ - Whatever is required for the package plugins specific for each system.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: full
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ diff_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ platform:
+ details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
+ platforms: all
+notes:
+ - While C(package) abstracts package managers to ease dealing with multiple distributions, package name often differs for the same software.
+
+'''
+EXAMPLES = '''
+- name: Install ntpdate
+ ansible.builtin.package:
+ name: ntpdate
+ state: present
+
+# This uses a variable as this changes per distribution.
+- name: Remove the apache package
+ ansible.builtin.package:
+ name: "{{ apache }}"
+ state: absent
+
+- name: Install the latest version of Apache and MariaDB
+ ansible.builtin.package:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+'''
diff --git a/lib/ansible/modules/package_facts.py b/lib/ansible/modules/package_facts.py
new file mode 100644
index 0000000..57c1d3e
--- /dev/null
+++ b/lib/ansible/modules/package_facts.py
@@ -0,0 +1,552 @@
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# most of it copied from AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: package_facts
+short_description: Package information as facts
+description:
+ - Return information about installed packages as facts.
+options:
+ manager:
+ description:
+ - The package manager used by the system so we can query the package information.
+ - Since 2.8 this is a list and can support multiple package managers per system.
+ - The 'portage' and 'pkg' options were added in version 2.8.
+ - The 'apk' option was added in version 2.11.
+ - The 'pkg_info' option was added in version 2.13.
+ default: ['auto']
+ choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman', 'apk', 'pkg_info']
+ type: list
+ elements: str
+ strategy:
+ description:
+ - This option controls how the module queries the package managers on the system.
+ C(first) means it will return only information for the first supported package manager available.
+ C(all) will return information for all supported and available package managers on the system.
+ choices: ['first', 'all']
+ default: 'first'
+ type: str
+ version_added: "2.8"
+version_added: "2.5"
+requirements:
+ - For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
+ - For Debian-based systems C(python-apt) package must be installed on targeted hosts.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+'''
+
+EXAMPLES = '''
+- name: Gather the package facts
+ ansible.builtin.package_facts:
+ manager: auto
+
+- name: Print the package facts
+ ansible.builtin.debug:
+ var: ansible_facts.packages
+
+- name: Check whether a package called foobar is installed
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
+ when: "'foobar' in ansible_facts.packages"
+
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Facts to add to ansible_facts.
+ returned: always
+ type: complex
+ contains:
+ packages:
+ description:
+ - Maps the package name to a non-empty list of dicts with package information.
+ - Every dict in the list corresponds to one installed version of the package.
+ - The fields described below are present for all package managers. Depending on the
+ package manager, there might be more fields for a package.
+ returned: when operating system level package manager is specified or auto detected manager
+ type: dict
+ contains:
+ name:
+ description: The package's name.
+ returned: always
+ type: str
+ version:
+ description: The package's version.
+ returned: always
+ type: str
+ source:
+ description: Where information on the package came from.
+ returned: always
+ type: str
+ sample: |-
+ {
+ "packages": {
+ "kernel": [
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ ...
+ ],
+ "kernel-tools": [
+ {
+ "name": "kernel-tools",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ }
+ ],
+ ...
+ }
+ }
+ # Sample rpm
+ {
+ "packages": {
+ "kernel": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.26.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.16.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.10.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.21.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools-libs": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools-libs",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ }
+ }
+ # Sample deb
+ {
+ "packages": {
+ "libbz2-1.0": [
+ {
+ "version": "1.0.6-5",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "libbz2-1.0"
+ }
+ ],
+ "patch": [
+ {
+ "version": "2.7.1-4ubuntu1",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "patch"
+ }
+ ],
+ }
+ }
+ # Sample pkg_info
+ {
+ "packages": {
+ "curl": [
+ {
+ "name": "curl",
+ "source": "pkg_info",
+ "version": "7.79.0"
+ }
+ ],
+ "intel-firmware": [
+ {
+ "name": "intel-firmware",
+ "source": "pkg_info",
+ "version": "20210608v0"
+ }
+ ],
+ }
+ }
+'''
+
+import re
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
+from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
+
+
+class RPM(LibMgr):
+
+ LIB = 'rpm'
+
+ def list_installed(self):
+ return self._lib.TransactionSet().dbMatch()
+
+ def get_package_details(self, package):
+ return dict(name=package[self._lib.RPMTAG_NAME],
+ version=package[self._lib.RPMTAG_VERSION],
+ release=package[self._lib.RPMTAG_RELEASE],
+ epoch=package[self._lib.RPMTAG_EPOCH],
+ arch=package[self._lib.RPMTAG_ARCH],)
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
+ we_have_lib = super(RPM, self).is_available()
+
+ try:
+ get_bin_path('rpm')
+
+ if not we_have_lib and not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/libexec/platform-python',
+ '/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit when the respawned copy completes
+
+ if not we_have_lib:
+ module.warn('Found "rpm" but %s' % (missing_required_lib(self.LIB)))
+ except ValueError:
+ pass
+
+ return we_have_lib
+
+
+class APT(LibMgr):
+
+ LIB = 'apt'
+
+ def __init__(self):
+ self._cache = None
+ super(APT, self).__init__()
+
+ @property
+ def pkg_cache(self):
+ if self._cache is not None:
+ return self._cache
+
+ self._cache = self._lib.Cache()
+ return self._cache
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
+ we_have_lib = super(APT, self).is_available()
+ if not we_have_lib:
+ for exe in ('apt', 'apt-get', 'aptitude'):
+ try:
+ get_bin_path(exe)
+ except ValueError:
+ continue
+ else:
+ if not has_respawned():
+ # try to locate an interpreter with the necessary lib
+ interpreters = ['/usr/bin/python3',
+ '/usr/bin/python2']
+ interpreter_path = probe_interpreters_for_module(interpreters, self.LIB)
+ if interpreter_path:
+ respawn_module(interpreter_path)
+ # end of the line for this process; this module will exit here when respawned copy completes
+
+ module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
+ break
+
+ return we_have_lib
+
+ def list_installed(self):
+ # Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
+ cache = self.pkg_cache
+ return [pk for pk in cache.keys() if cache[pk].is_installed]
+
+ def get_package_details(self, package):
+ ac_pkg = self.pkg_cache[package].installed
+ return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
+
+
+class PACMAN(CLIMgr):
+
+ CLI = 'pacman'
+
+ def list_installed(self):
+ locale = get_best_parsable_locale(module)
+ rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL=locale))
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.split("\n\n")[:-1]
+
+ def get_package_details(self, package):
+ # parse values of details that might extend over several lines
+ raw_pkg_details = {}
+ last_detail = None
+ for line in package.splitlines():
+ m = re.match(r"([\w ]*[\w]) +: (.*)", line)
+ if m:
+ last_detail = m.group(1)
+ raw_pkg_details[last_detail] = m.group(2)
+ else:
+ # append value to previous detail
+ raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
+
+ provides = None
+ if raw_pkg_details['Provides'] != 'None':
+ provides = [
+ p.split('=')[0]
+ for p in raw_pkg_details['Provides'].split(' ')
+ ]
+
+ return {
+ 'name': raw_pkg_details['Name'],
+ 'version': raw_pkg_details['Version'],
+ 'arch': raw_pkg_details['Architecture'],
+ 'provides': provides,
+ }
+
+
+class PKG(CLIMgr):
+
+ CLI = 'pkg'
+ atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+
+ pkg = dict(zip(self.atoms, package.split('\t')))
+
+ if 'arch' in pkg:
+ try:
+ pkg['arch'] = pkg['arch'].split(':')[2]
+ except IndexError:
+ pass
+
+ if 'automatic' in pkg:
+ pkg['automatic'] = bool(int(pkg['automatic']))
+
+ if 'category' in pkg:
+ pkg['category'] = pkg['category'].split('/', 1)[0]
+
+ if 'version' in pkg:
+ if ',' in pkg['version']:
+ pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
+ else:
+ pkg['port_epoch'] = 0
+
+ if '_' in pkg['version']:
+ pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
+ else:
+ pkg['revision'] = '0'
+
+ if 'vital' in pkg:
+ pkg['vital'] = bool(int(pkg['vital']))
+
+ return pkg
+
+
+class PORTAGE(CLIMgr):
+
+ CLI = 'qlist'
+ atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
+
+ def list_installed(self):
+ rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
+ if rc != 0:
+ raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ return dict(zip(self.atoms, package.split()))
+
+
+class APK(CLIMgr):
+
+ CLI = 'apk'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'info', '-v'])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ raw_pkg_details = {'name': package, 'version': '', 'release': ''}
+ nvr = package.rsplit('-', 2)
+ try:
+ return {
+ 'name': nvr[0],
+ 'version': nvr[1],
+ 'release': nvr[2],
+ }
+ except IndexError:
+ return raw_pkg_details
+
+
+class PKG_INFO(CLIMgr):
+
+ CLI = 'pkg_info'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, '-a'])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ raw_pkg_details = {'name': package, 'version': ''}
+ details = package.split(maxsplit=1)[0].rsplit('-', maxsplit=1)
+
+ try:
+ return {
+ 'name': details[0],
+ 'version': details[1],
+ }
+ except IndexError:
+ return raw_pkg_details
+
+
+def main():
+
+ # get supported pkg managers
+ PKG_MANAGERS = get_all_pkg_managers()
+ PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'elements': 'str', 'default': ['auto']},
+ strategy={'choices': ['first', 'all'], 'default': 'first'}),
+ supports_check_mode=True)
+ packages = {}
+ results = {'ansible_facts': {}}
+ managers = [x.lower() for x in module.params['manager']]
+ strategy = module.params['strategy']
+
+ if 'auto' in managers:
+ # keep order from user, we do dedupe below
+ managers.extend(PKG_MANAGER_NAMES)
+ managers.remove('auto')
+
+ unsupported = set(managers).difference(PKG_MANAGER_NAMES)
+ if unsupported:
+ if 'auto' in module.params['manager']:
+ msg = 'Could not auto detect a usable package manager, check warnings for details.'
+ else:
+ msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
+ module.fail_json(msg=msg)
+
+ found = 0
+ seen = set()
+ for pkgmgr in managers:
+
+ if found and strategy == 'first':
+ break
+
+ # dedupe as per above
+ if pkgmgr in seen:
+ continue
+ seen.add(pkgmgr)
+ try:
+ try:
+ # manager throws exception on init (calls self.test) if not usable.
+ manager = PKG_MANAGERS[pkgmgr]()
+ if manager.is_available():
+ found += 1
+ packages.update(manager.get_packages())
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
+ continue
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
+
+ if found == 0:
+ msg = ('Could not detect a supported package manager from the following list: %s, '
+ 'or the required Python library is not installed. Check warnings for details.' % managers)
+ module.fail_json(msg=msg)
+
+ # Set the facts, this will override the facts in ansible_facts that might exist from previous runs
+ # when using operating system level or distribution package managers
+ results['ansible_facts']['packages'] = packages
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pause.py b/lib/ansible/modules/pause.py
new file mode 100644
index 0000000..09061dd
--- /dev/null
+++ b/lib/ansible/modules/pause.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pause
+short_description: Pause playbook execution
+description:
+ - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged.
+ All parameters are optional. The default behavior is to pause with a prompt.
+ - To pause/wait/sleep per host, use the M(ansible.builtin.wait_for) module.
+ - You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely.
+ To continue early press C(ctrl+c) and then C(c). To abort a playbook press C(ctrl+c) and then C(a).
+ - The pause module integrates into async/parallelized playbooks without any special considerations (see Rolling Updates).
+ When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts.
+ - This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ minutes:
+ description:
+ - A positive number of minutes to pause for.
+ seconds:
+ description:
+ - A positive number of seconds to pause for.
+ prompt:
+ description:
+ - Optional text to use for the prompt message.
+ echo:
+ description:
+ - Controls whether or not keyboard input is shown when typing.
+ - Has no effect if 'seconds' or 'minutes' is set.
+ type: bool
+ default: 'yes'
+ version_added: 2.5
+author: "Tim Bielawa (@tbielawa)"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: full
+ check_mode:
+ support: full
+ connection:
+ support: none
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+notes:
+ - Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
+ - User input is not captured or echoed, regardless of echo setting, when minutes or seconds is specified.
+'''
+
+EXAMPLES = '''
+- name: Pause for 5 minutes to build app cache
+ ansible.builtin.pause:
+ minutes: 5
+
+- name: Pause until you can verify updates to an application were successful
+ ansible.builtin.pause:
+
+- name: A helpful reminder of what to look out for post-update
+ ansible.builtin.pause:
+ prompt: "Make sure org.foo.FooOverload exception is not present"
+
+- name: Pause to get some sensitive input
+ ansible.builtin.pause:
+ prompt: "Enter a secret"
+ echo: no
+'''
+
+RETURN = '''
+user_input:
+ description: User input from interactive console
+ returned: if no waiting time set
+ type: str
+ sample: Example user input
+start:
+ description: Time when started pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:07.298862"
+stop:
+ description: Time when ended pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:09.552594"
+delta:
+ description: Time paused in seconds
+ returned: always
+ type: str
+ sample: 2
+stdout:
+ description: Output of pause module
+ returned: always
+ type: str
+ sample: Paused for 0.04 minutes
+echo:
+ description: Value of echo setting
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/lib/ansible/modules/ping.py b/lib/ansible/modules/ping.py
new file mode 100644
index 0000000..f6267a8
--- /dev/null
+++ b/lib/ansible/modules/ping.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
+ - For Windows targets, use the M(ansible.windows.win_ping) module instead.
+ - For Network targets, use the M(ansible.netcommon.net_ping) module instead.
+options:
+ data:
+ description:
+ - Data to return for the C(ping) return value.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+ - module: ansible.netcommon.net_ping
+ - module: ansible.windows.win_ping
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+# ansible webservers -m ansible.builtin.ping
+
+- name: Example from an Ansible Playbook
+ ansible.builtin.ping:
+
+- name: Induce an exception to see what happens
+ ansible.builtin.ping:
+ data: crash
+'''
+
+RETURN = '''
+ping:
+ description: Value provided with the data parameter.
+ returned: success
+ type: str
+ sample: pong
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(type='str', default='pong'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+
+ result = dict(
+ ping=module.params['data'],
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pip.py b/lib/ansible/modules/pip.py
new file mode 100644
index 0000000..a9930cc
--- /dev/null
+++ b/lib/ansible/modules/pip.py
@@ -0,0 +1,832 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pip
+short_description: Manages Python library dependencies
+description:
+ - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
+ or C(requirements)."
+version_added: "0.7"
+options:
+ name:
+ description:
+ - The name of a Python library to install or the url(bzr+,hg+,git+,svn+) of the remote package.
+ - This can be a list (since 2.2) and contain version specifiers (since 2.7).
+ type: list
+ elements: str
+ version:
+ description:
+ - The version number to install of the Python library specified in the I(name) parameter.
+ type: str
+ requirements:
+ description:
+ - The path to a pip requirements file, which should be local to the remote system.
+ File can be specified as a relative path if using the chdir option.
+ type: str
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) directory to install into.
+ It cannot be specified together with the 'executable' parameter
+ (added in 2.1).
+ If the virtualenv does not exist, it will be created before installing
+ packages. The optional virtualenv_site_packages, virtualenv_command,
+ and virtualenv_python options affect the creation of the virtualenv.
+ type: path
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: "no"
+ version_added: "1.0"
+ virtualenv_command:
+ description:
+ - The command or a pathname to the command to create the virtual
+ environment with. For example C(pyvenv), C(virtualenv),
+ C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
+ type: path
+ default: virtualenv
+ version_added: "1.1"
+ virtualenv_python:
+ description:
+ - The Python executable used for creating the virtual environment.
+ For example C(python3.5), C(python2.7). When not specified, the
+ Python version used to run the ansible module is used. This parameter
+ should not be used when C(virtualenv_command) is using C(pyvenv) or
+ the C(-m venv) module.
+ type: str
+ version_added: "2.0"
+ state:
+ description:
+ - The state of module
+ - The 'forcereinstall' option is only available in Ansible 2.1 and above.
+ type: str
+ choices: [ absent, forcereinstall, latest, present ]
+ default: present
+ extra_args:
+ description:
+ - Extra arguments passed to pip.
+ type: str
+ version_added: "1.0"
+ editable:
+ description:
+ - Pass the editable flag.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+ chdir:
+ description:
+ - cd into this directory before running the command
+ type: path
+ version_added: "1.3"
+ executable:
+ description:
+ - The explicit executable or pathname for the pip executable,
+ if different from the Ansible Python interpreter. For
+ example C(pip3.3), if there are both Python 2.7 and 3.3 installations
+ in the system and you want to run pip for the Python 3.3 installation.
+ - Mutually exclusive with I(virtualenv) (added in 2.1).
+ - Does not affect the Ansible Python interpreter.
+ - The setuptools package must be installed for both the Ansible Python interpreter
+ and for the version of Python specified by this option.
+ type: path
+ version_added: "1.3"
+ umask:
+ description:
+ - The system umask to apply before installing the pip package. This is
+ useful, for example, when installing on systems that have a very
+ restrictive umask by default (e.g., "0077") and you want to pip install
+ packages which are to be used by all users. Note that this requires you
+ to specify desired umask mode as an octal string, (e.g., "0022").
+ type: str
+ version_added: "2.1"
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The virtualenv (U(http://www.virtualenv.org/)) must be
+ installed on the remote host if the virtualenv parameter is specified and
+ the virtualenv needs to be created.
+ - Although it executes using the Ansible Python interpreter, the pip module shells out to
+ run the actual pip command, so it can use any pip version you specify with I(executable).
+ By default, it uses the pip version for the Ansible Python interpreter. For example, pip3 on python 3, and pip2 or pip on python 2.
+ - The interpreter used by Ansible
+ (see R(ansible_python_interpreter, ansible_python_interpreter))
+ requires the setuptools package, regardless of the version of pip set with
+ the I(executable) option.
+requirements:
+- pip
+- virtualenv
+- setuptools
+author:
+- Matt Wright (@mattupstate)
+'''
+
+EXAMPLES = '''
+- name: Install bottle python package
+ ansible.builtin.pip:
+ name: bottle
+
+- name: Install bottle python package on version 0.11
+ ansible.builtin.pip:
+ name: bottle==0.11
+
+- name: Install bottle python package with version specifiers
+ ansible.builtin.pip:
+ name: bottle>0.10,<0.20,!=0.11
+
+- name: Install multi python packages with version specifiers
+ ansible.builtin.pip:
+ name:
+ - django>1.11.0,<1.12.0
+ - bottle>0.10,<0.20,!=0.11
+
+- name: Install python package using a proxy
+ ansible.builtin.pip:
+ name: six
+ environment:
+ http_proxy: 'http://127.0.0.1:8080'
+ https_proxy: 'https://127.0.0.1:8080'
+
+# You do not have to supply '-e' option in extra_args
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+,svn+)
+ ansible.builtin.pip:
+ name: svn+http://myrepo/svn/MyApp#egg=MyApp
+
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+)
+ ansible.builtin.pip:
+ name: git+http://myrepo/app/MyApp
+
+- name: Install MyApp from local tarball
+ ansible.builtin.pip:
+ name: file:///path/to/MyApp.tar.gz
+
+- name: Install bottle into the specified (virtualenv), inheriting none of the globally installed modules
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+
+- name: Install bottle into the specified (virtualenv), inheriting globally installed modules
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_site_packages: yes
+
+- name: Install bottle into the specified (virtualenv), using Python 2.7
+ ansible.builtin.pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_command: virtualenv-2.7
+
+- name: Install bottle within a user home directory
+ ansible.builtin.pip:
+ name: bottle
+ extra_args: --user
+
+- name: Install specified python requirements
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+
+- name: Install specified python requirements in indicated (virtualenv)
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ virtualenv: /my_app/venv
+
+- name: Install specified python requirements and custom Index URL
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ extra_args: -i https://example.com/pypi/simple
+
+- name: Install specified python requirements offline from a local directory with downloaded packages
+ ansible.builtin.pip:
+ requirements: /my_app/requirements.txt
+ extra_args: "--no-index --find-links=file:///my_downloaded_packages_dir"
+
+- name: Install bottle for Python 3.3 specifically, using the 'pip3.3' executable
+ ansible.builtin.pip:
+ name: bottle
+ executable: pip3.3
+
+- name: Install bottle, forcing reinstallation if it's already installed
+ ansible.builtin.pip:
+ name: bottle
+ state: forcereinstall
+
+- name: Install bottle while ensuring the umask is 0022 (to ensure other users can use it)
+ ansible.builtin.pip:
+ name: bottle
+ umask: "0022"
+ become: True
+'''
+
+RETURN = '''
+cmd:
+ description: pip command used by the module
+ returned: success
+ type: str
+ sample: pip2 install ansible six
+name:
+ description: list of python modules targeted by pip
+ returned: success
+ type: list
+ sample: ['ansible', 'six']
+requirements:
+ description: Path to the requirements file
+ returned: success, if a requirements file was provided
+ type: str
+ sample: "/srv/git/project/requirements.txt"
+version:
+ description: Version of the package specified in 'name'
+ returned: success, if a name and version were provided
+ type: str
+ sample: "2.5.1"
+virtualenv:
+ description: Path to the virtualenv
+ returned: success, if a virtualenv path was provided
+ type: str
+ sample: "/tmp/virtualenv"
+'''
+
+import os
+import re
+import sys
+import tempfile
+import operator
+import shlex
+import traceback
+import types
+
+from ansible.module_utils.compat.version import LooseVersion
+
+SETUPTOOLS_IMP_ERR = None
+try:
+ from pkg_resources import Requirement
+
+ HAS_SETUPTOOLS = True
+except ImportError:
+ HAS_SETUPTOOLS = False
+ SETUPTOOLS_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule, is_executable, missing_required_lib
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.six import PY3
+
+
+#: Python one-liners to be run at the command line that will determine the
+# installed version for these special libraries. These are libraries that
+# don't end up in the output of pip freeze.
+_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
+ 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
+
+_VCS_RE = re.compile(r'(svn|git|hg|bzr)\+')
+
+op_dict = {">=": operator.ge, "<=": operator.le, ">": operator.gt,
+ "<": operator.lt, "==": operator.eq, "!=": operator.ne, "~=": operator.ge}
+
+
+def _is_vcs_url(name):
+ """Test whether a name is a vcs url or not."""
+ return re.match(_VCS_RE, name)
+
+
+def _is_package_name(name):
+ """Test whether the name is a package name or a version specifier."""
+ return not name.lstrip().startswith(tuple(op_dict.keys()))
+
+
+def _recover_package_name(names):
+ """Recover package names as list from user's raw input.
+
+ :input: a mixed and invalid list of names or version specifiers
+ :return: a list of valid package name
+
+ eg.
+ input: ['django>1.11.1', '<1.11.3', 'ipaddress', 'simpleproject>1.1.0', '<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+
+ input: ['django>1.11.1,<1.11.3,ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ """
+ # rebuild input name to a flat list so we can tolerate any combination of input
+ tmp = []
+ for one_line in names:
+ tmp.extend(one_line.split(","))
+ names = tmp
+
+ # reconstruct the names
+ name_parts = []
+ package_names = []
+ in_brackets = False
+ for name in names:
+ if _is_package_name(name) and not in_brackets:
+ if name_parts:
+ package_names.append(",".join(name_parts))
+ name_parts = []
+ if "[" in name:
+ in_brackets = True
+ if in_brackets and "]" in name:
+ in_brackets = False
+ name_parts.append(name)
+ package_names.append(",".join(name_parts))
+ return package_names
+
+
+def _get_cmd_options(module, cmd):
+ thiscmd = cmd + " --help"
+ rc, stdout, stderr = module.run_command(thiscmd)
+ if rc != 0:
+ module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
+
+ words = stdout.strip().split()
+ cmd_options = [x for x in words if x.startswith('--')]
+ return cmd_options
+
+
+def _get_packages(module, pip, chdir):
+ '''Return results of pip command to get packages.'''
+ # Try 'pip list' command first.
+ command = pip + ['list', '--format=freeze']
+ locale = get_best_parsable_locale(module)
+ lang_env = {'LANG': locale, 'LC_ALL': locale, 'LC_MESSAGES': locale}
+ rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
+
+ # If there was an error (pip version too old) then use 'pip freeze'.
+ if rc != 0:
+ command = pip + ['freeze']
+ rc, out, err = module.run_command(command, cwd=chdir)
+ if rc != 0:
+ _fail(module, command, out, err)
+
+ return ' '.join(command), out, err
+
+
+def _is_present(module, req, installed_pkgs, pkg_command):
+ '''Return whether or not package is installed.'''
+ for pkg in installed_pkgs:
+ if '==' in pkg:
+ pkg_name, pkg_version = pkg.split('==')
+ pkg_name = Package.canonicalize_name(pkg_name)
+ else:
+ continue
+
+ if pkg_name == req.package_name and req.is_satisfied_by(pkg_version):
+ return True
+
+ return False
+
+
+def _get_pip(module, env=None, executable=None):
+ # Older pip only installed under the "/usr/bin/pip" name. Many Linux
+ # distros install it there.
+ # By default, we try to use pip required for the current python
+ # interpreter, so people can use pip to install modules dependencies
+ candidate_pip_basenames = ('pip2', 'pip')
+ if PY3:
+ # pip under python3 installs the "/usr/bin/pip3" name
+ candidate_pip_basenames = ('pip3',)
+
+ pip = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ pip = executable
+ else:
+ # If you define your own executable that executable should be the only candidate.
+ # As noted in the docs, executable doesn't work with virtualenvs.
+ candidate_pip_basenames = (executable,)
+ elif executable is None and env is None and _have_pip_module():
+ # If no executable or virtualenv were specified, use the pip module for the current Python interpreter if available.
+ # Use of `__main__` is required to support Python 2.6 since support for executing packages with `runpy` was added in Python 2.7.
+ # Without it Python 2.6 gives the following error: pip is a package and cannot be directly executed
+ pip = [sys.executable, '-m', 'pip.__main__']
+
+ if pip is None:
+ if env is None:
+ opt_dirs = []
+ for basename in candidate_pip_basenames:
+ pip = module.get_bin_path(basename, False, opt_dirs)
+ if pip is not None:
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find any of %s to use. pip'
+ ' needs to be installed.' % ', '.join(candidate_pip_basenames))
+ else:
+ # If we're using a virtualenv we must use the pip from the
+ # virtualenv
+ venv_dir = os.path.join(env, 'bin')
+ candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
+ for basename in candidate_pip_basenames:
+ candidate = os.path.join(venv_dir, basename)
+ if os.path.exists(candidate) and is_executable(candidate):
+ pip = candidate
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find pip in the virtualenv, %s, ' % env +
+ 'under any of these names: %s. ' % (', '.join(candidate_pip_basenames)) +
+ 'Make sure pip is present in the virtualenv.')
+
+ if not isinstance(pip, list):
+ pip = [pip]
+
+ return pip
+
+
+def _have_pip_module(): # type: () -> bool
+ """Return True if the `pip` module can be found using the current Python interpreter, otherwise return False."""
+ try:
+ from importlib.util import find_spec
+ except ImportError:
+ find_spec = None # type: ignore[assignment] # type: ignore[no-redef]
+
+ if find_spec:
+ # noinspection PyBroadException
+ try:
+ # noinspection PyUnresolvedReferences
+ found = bool(find_spec('pip'))
+ except Exception:
+ found = False
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ # noinspection PyBroadException
+ try:
+ # noinspection PyDeprecation
+ imp.find_module('pip')
+ except Exception:
+ found = False
+ else:
+ found = True
+
+ return found
+
+
+def _fail(module, cmd, out, err):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg)
+
+
+def _get_package_info(module, package, env=None):
+ """This is only needed for special packages which do not show up in pip freeze
+
+ pip and setuptools fall into this category.
+
+ :returns: a string containing the version number if the package is
+ installed. None if the package is not installed.
+ """
+ if env:
+ opt_dirs = ['%s/bin' % env]
+ else:
+ opt_dirs = []
+ python_bin = module.get_bin_path('python', False, opt_dirs)
+
+ if python_bin is None:
+ formatted_dep = None
+ else:
+ rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
+ if rc:
+ formatted_dep = None
+ else:
+ formatted_dep = '%s==%s' % (package, out.strip())
+ return formatted_dep
+
+
+def setup_virtualenv(module, env, chdir, out, err):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = shlex.split(module.params['virtualenv_command'])
+
+ # Find the binary for the command in the PATH
+ # and switch the command for the explicit path.
+ if os.path.basename(cmd[0]) == cmd[0]:
+ cmd[0] = module.get_bin_path(cmd[0], True)
+
+ # Add the system-site-packages option if that
+ # is enabled, otherwise explicitly set the option
+ # to not use system-site-packages if that is an
+ # option provided by the command's help function.
+ if module.params['virtualenv_site_packages']:
+ cmd.append('--system-site-packages')
+ else:
+ cmd_opts = _get_cmd_options(module, cmd[0])
+ if '--no-site-packages' in cmd_opts:
+ cmd.append('--no-site-packages')
+
+ virtualenv_python = module.params['virtualenv_python']
+ # -p is a virtualenv option, not compatible with pyenv or venv
+ # this conditional validates if the command being used is not any of them
+ if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
+ if virtualenv_python:
+ cmd.append('-p%s' % virtualenv_python)
+ elif PY3:
+ # Ubuntu currently has a patch making virtualenv always
+ # try to use python2. Since Ubuntu16 works without
+ # python2 installed, this is a problem. This code mimics
+ # the upstream behaviour of using the python which invoked
+ # virtualenv to determine which python is used inside of
+ # the virtualenv (when none are specified).
+ cmd.append('-p%s' % sys.executable)
+
+ # if venv or pyvenv are used and virtualenv_python is defined, then
+ # virtualenv_python is ignored, this has to be acknowledged
+ elif module.params['virtualenv_python']:
+ module.fail_json(
+ msg='virtualenv_python should not be used when'
+ ' using the venv module or pyvenv as virtualenv_command'
+ )
+
+ cmd.append(env)
+ rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
+ out += out_venv
+ err += err_venv
+ if rc != 0:
+ _fail(module, cmd, out, err)
+ return out, err
+
+
+class Package:
+ """Python distribution package metadata wrapper.
+
+ A wrapper class for Requirement, which provides
+ API to parse package name, version specifier,
+ test whether a package is already satisfied.
+ """
+
+ _CANONICALIZE_RE = re.compile(r'[-_.]+')
+
+ def __init__(self, name_string, version_string=None):
+ self._plain_package = False
+ self.package_name = name_string
+ self._requirement = None
+
+ if version_string:
+ version_string = version_string.lstrip()
+ separator = '==' if version_string[0].isdigit() else ' '
+ name_string = separator.join((name_string, version_string))
+ try:
+ self._requirement = Requirement.parse(name_string)
+ # old pkg_resource will replace 'setuptools' with 'distribute' when it's already installed
+ if self._requirement.project_name == "distribute" and "setuptools" in name_string:
+ self.package_name = "setuptools"
+ self._requirement.project_name = "setuptools"
+ else:
+ self.package_name = Package.canonicalize_name(self._requirement.project_name)
+ self._plain_package = True
+ except ValueError as e:
+ pass
+
+ @property
+ def has_version_specifier(self):
+ if self._plain_package:
+ return bool(self._requirement.specs)
+ return False
+
+ def is_satisfied_by(self, version_to_test):
+ if not self._plain_package:
+ return False
+ try:
+ return self._requirement.specifier.contains(version_to_test, prereleases=True)
+ except AttributeError:
+ # old setuptools has no specifier, do fallback
+ version_to_test = LooseVersion(version_to_test)
+ return all(
+ op_dict[op](version_to_test, LooseVersion(ver))
+ for op, ver in self._requirement.specs
+ )
+
+ @staticmethod
+ def canonicalize_name(name):
+ # This is taken from PEP 503.
+ return Package._CANONICALIZE_RE.sub("-", name).lower()
+
+ def __str__(self):
+ if self._plain_package:
+ return to_native(self._requirement)
+ return self.package_name
+
+
+def main():
+ state_map = dict(
+ present=['install'],
+ absent=['uninstall', '-y'],
+ latest=['install', '-U'],
+ forcereinstall=['install', '-U', '--force-reinstall'],
+ )
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=list(state_map.keys())),
+ name=dict(type='list', elements='str'),
+ version=dict(type='str'),
+ requirements=dict(type='str'),
+ virtualenv=dict(type='path'),
+ virtualenv_site_packages=dict(type='bool', default=False),
+ virtualenv_command=dict(type='path', default='virtualenv'),
+ virtualenv_python=dict(type='str'),
+ extra_args=dict(type='str'),
+ editable=dict(type='bool', default=False),
+ chdir=dict(type='path'),
+ executable=dict(type='path'),
+ umask=dict(type='str'),
+ ),
+ required_one_of=[['name', 'requirements']],
+ mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
+ supports_check_mode=True,
+ )
+
+ if not HAS_SETUPTOOLS:
+ module.fail_json(msg=missing_required_lib("setuptools"),
+ exception=SETUPTOOLS_IMP_ERR)
+
+ state = module.params['state']
+ name = module.params['name']
+ version = module.params['version']
+ requirements = module.params['requirements']
+ extra_args = module.params['extra_args']
+ chdir = module.params['chdir']
+ umask = module.params['umask']
+ env = module.params['virtualenv']
+
+ venv_created = False
+ if env and chdir:
+ env = os.path.join(chdir, env)
+
+ if umask and not isinstance(umask, int):
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=to_native(sys.exc_info()[1]))
+
+ old_umask = None
+ if umask is not None:
+ old_umask = os.umask(umask)
+ try:
+ if state == 'latest' and version is not None:
+ module.fail_json(msg='version is incompatible with state=latest')
+
+ if chdir is None:
+ # this is done to avoid permissions issues with privilege escalation and virtualenvs
+ chdir = tempfile.gettempdir()
+
+ err = ''
+ out = ''
+
+ if env:
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ venv_created = True
+ out, err = setup_virtualenv(module, env, chdir, out, err)
+
+ pip = _get_pip(module, env, module.params['executable'])
+
+ cmd = pip + state_map[state]
+
+ # If there's a virtualenv we want things we install to be able to use other
+ # installations that exist as binaries within this virtualenv. Example: we
+ # install cython and then gevent -- gevent needs to use the cython binary,
+ # not just a python package that will be found by calling the right python.
+ # So if there's a virtualenv, we add that bin/ to the beginning of the PATH
+ # in run_command by setting path_prefix here.
+ path_prefix = None
+ if env:
+ path_prefix = os.path.join(env, 'bin')
+
+ # Automatically apply -e option to extra_args when source is a VCS url. VCS
+ # includes those beginning with svn+, git+, hg+ or bzr+
+ has_vcs = False
+ if name:
+ for pkg in name:
+ if pkg and _is_vcs_url(pkg):
+ has_vcs = True
+ break
+
+ # convert raw input package names to Package instances
+ packages = [Package(pkg) for pkg in _recover_package_name(name)]
+ # check invalid combination of arguments
+ if version is not None:
+ if len(packages) > 1:
+ module.fail_json(
+ msg="'version' argument is ambiguous when installing multiple package distributions. "
+ "Please specify version restrictions next to each package in 'name' argument."
+ )
+ if packages[0].has_version_specifier:
+ module.fail_json(
+ msg="The 'version' argument conflicts with any version specifier provided along with a package name. "
+ "Please keep the version specifier, but remove the 'version' argument."
+ )
+ # if the version specifier is provided by version, append that into the package
+ packages[0] = Package(to_native(packages[0]), version)
+
+ if module.params['editable']:
+ args_list = [] # used if extra_args is not used at all
+ if extra_args:
+ args_list = extra_args.split(' ')
+ if '-e' not in args_list:
+ args_list.append('-e')
+ # Ok, we will reconstruct the option string
+ extra_args = ' '.join(args_list)
+
+ if extra_args:
+ cmd.extend(shlex.split(extra_args))
+
+ if name:
+ cmd.extend(to_native(p) for p in packages)
+ elif requirements:
+ cmd.extend(['-r', requirements])
+ else:
+ module.exit_json(
+ changed=False,
+ warnings=["No valid name or requirements file found."],
+ )
+
+ if module.check_mode:
+ if extra_args or requirements or state == 'latest' or not name:
+ module.exit_json(changed=True)
+
+ pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
+
+ out += out_pip
+ err += err_pip
+
+ changed = False
+ if name:
+ pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
+
+ if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
+ # Older versions of pip (pre-1.3) do not have pip list.
+ # pip freeze does not list setuptools or pip in its output
+ # So we need to get those via a specialcase
+ for pkg in ('setuptools', 'pip'):
+ if pkg in name:
+ formatted_dep = _get_package_info(module, pkg, env)
+ if formatted_dep is not None:
+ pkg_list.append(formatted_dep)
+ out += '%s\n' % formatted_dep
+
+ for package in packages:
+ is_present = _is_present(module, package, pkg_list, pkg_cmd)
+ if (state == 'present' and not is_present) or (state == 'absent' and is_present):
+ changed = True
+ break
+ module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
+
+ out_freeze_before = None
+ if requirements or has_vcs:
+ _, out_freeze_before, _ = _get_packages(module, pip, chdir)
+
+ rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
+ out += out_pip
+ err += err_pip
+ if rc == 1 and state == 'absent' and \
+ ('not installed' in out_pip or 'not installed' in err_pip):
+ pass # rc is 1 when attempting to uninstall non-installed package
+ elif rc != 0:
+ _fail(module, cmd, out, err)
+
+ if state == 'absent':
+ changed = 'Successfully uninstalled' in out_pip
+ else:
+ if out_freeze_before is None:
+ changed = 'Successfully installed' in out_pip
+ else:
+ _, out_freeze_after, _ = _get_packages(module, pip, chdir)
+ changed = out_freeze_before != out_freeze_after
+
+ changed = changed or venv_created
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
+ state=state, requirements=requirements, virtualenv=env,
+ stdout=out, stderr=err)
+ finally:
+ if old_umask is not None:
+ os.umask(old_umask)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/raw.py b/lib/ansible/modules/raw.py
new file mode 100644
index 0000000..dc40a73
--- /dev/null
+++ b/lib/ansible/modules/raw.py
@@ -0,0 +1,88 @@
+# This is a virtual module that is entirely implemented server side
+
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: raw
+short_description: Executes a low-down and dirty command
+version_added: historical
+options:
+ free_form:
+ description:
+ - The raw module takes a free form command to run.
+ - There is no parameter actually named 'free form'; see the examples!
+ required: true
+ executable:
+ description:
+ - Change the shell used to execute the command. Should be an absolute path to the executable.
+ - When using privilege escalation (C(become)) a default shell will be assigned if one is not provided
+ as privilege escalation requires a shell.
+ version_added: "1.0"
+description:
+ - Executes a low-down and dirty SSH command, not going through the module
+ subsystem.
+ - This is useful and should only be done in a few cases. A common
+ case is installing C(python) on a system without python installed by default.
+ Another is speaking to any devices such as
+ routers that do not have any Python installed. In any other case, using
+ the M(ansible.builtin.shell) or M(ansible.builtin.command) module is much more appropriate.
+ - Arguments given to C(raw) are run directly through the configured remote shell.
+ - Standard output, error output and return code are returned when
+ available.
+ - There is no change handler support for this module.
+ - This module does not require python on the remote system, much like
+ the M(ansible.builtin.script) module.
+ - This module is also supported for Windows targets.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
+ platforms: all
+ raw:
+ support: full
+notes:
+ - "If using raw from a playbook, you may need to disable fact gathering
+ using C(gather_facts: no) if you're using C(raw) to bootstrap python
+ onto the machine."
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) or M(ansible.builtin.shell) modules instead.
+ - The C(environment) keyword does not work with raw normally, it requires a shell
+ which means it only works if C(executable) is set or using the module
+ with privilege escalation (C(become)).
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Bootstrap a host without python2 installed
+ ansible.builtin.raw: dnf install -y python2 python2-dnf libselinux-python
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ ansible.builtin.raw: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Safely use templated variables. Always use quote filter to avoid injection issues.
+ ansible.builtin.raw: "{{ package_mgr|quote }} {{ pkg_flags|quote }} install {{ python|quote }}"
+
+- name: List user accounts on a Windows system
+ ansible.builtin.raw: Get-WmiObject -Class Win32_UserAccount
+'''
diff --git a/lib/ansible/modules/reboot.py b/lib/ansible/modules/reboot.py
new file mode 100644
index 0000000..71e6294
--- /dev/null
+++ b/lib/ansible/modules/reboot.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: reboot
+short_description: Reboot a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use C(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Reboot a machine, wait for it to go down, come back up, and respond to commands.
+ - For Windows targets, use the M(ansible.windows.win_reboot) module instead.
+version_added: "2.7"
+options:
+ pre_reboot_delay:
+ description:
+ - Seconds to wait before reboot. Passed as a parameter to the reboot command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ post_reboot_delay:
+ description:
+ - Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
+ - This is useful if you want wait for something to settle despite your connection already working.
+ type: int
+ default: 0
+ reboot_timeout:
+ description:
+ - Maximum seconds to wait for machine to reboot and respond to a test command.
+ - This timeout is evaluated separately for both reboot verification and test command success so the
+ maximum execution time for the module is twice this amount.
+ type: int
+ default: 600
+ connect_timeout:
+ description:
+ - Maximum seconds to wait for a successful connection to the managed hosts before trying again.
+ - If unspecified, the default setting for the underlying connection plugin is used.
+ type: int
+ test_command:
+ description:
+ - Command to run on the rebooted host and expect success from to determine the machine is ready for
+ further tasks.
+ type: str
+ default: whoami
+ msg:
+ description:
+ - Message to display to users before reboot.
+ type: str
+ default: Reboot initiated by Ansible
+
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: str
+ default: ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
+ version_added: '2.8'
+
+ boot_time_command:
+ description:
+ - Command to run that returns a unique string indicating the last time the system was booted.
+ - Setting this to a command that has different output each time it is run will cause the task to fail.
+ type: str
+ default: 'cat /proc/sys/kernel/random/boot_id'
+ version_added: '2.10'
+
+ reboot_command:
+ description:
+ - Command to run that reboots the system, including any parameters passed to the command.
+ - Can be an absolute path to the command or just the command name. If an absolute path to the
+ command is not given, C(search_paths) on the target system will be searched to find the absolute path.
+ - This will cause C(pre_reboot_delay), C(post_reboot_delay), and C(msg) to be ignored.
+ type: str
+ default: '[determined based on target OS]'
+ version_added: '2.11'
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.windows.win_reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally reboot the machine with all defaults
+ ansible.builtin.reboot:
+
+- name: Reboot a slow machine that might have lots of updates to apply
+ ansible.builtin.reboot:
+ reboot_timeout: 3600
+
+- name: Reboot a machine with shutdown command in unusual place
+ ansible.builtin.reboot:
+ search_paths:
+ - '/lib/molly-guard'
+
+- name: Reboot machine using a custom reboot command
+ ansible.builtin.reboot:
+ reboot_command: launchctl reboot userspace
+ boot_time_command: uptime | cut -d ' ' -f 5
+
+'''
+
+RETURN = r'''
+rebooted:
+ description: true if the machine was rebooted
+ returned: always
+ type: bool
+ sample: true
+elapsed:
+ description: The number of seconds that elapsed waiting for the system to be rebooted.
+ returned: always
+ type: int
+ sample: 23
+'''
diff --git a/lib/ansible/modules/replace.py b/lib/ansible/modules/replace.py
new file mode 100644
index 0000000..4b8f74f
--- /dev/null
+++ b/lib/ansible/modules/replace.py
@@ -0,0 +1,316 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evan Kaufman <evan@digitalflophouse.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: replace
+author: Evan Kaufman (@EvanK)
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - files
+ - validate
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: none
+short_description: Replace all instances of a particular string in a
+ file using a back-referenced regular expression
+description:
+ - This module will replace all instances of a pattern within a file.
+ - It is up to the user to maintain idempotence by ensuring that the
+ same pattern would never match any replacements made.
+version_added: "1.6"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in the contents of the file.
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses MULTILINE mode, which means C(^) and C($) match the beginning
+ and end of the file, as well as the beginning and end respectively
+ of I(each line) of the file.
+ - Does not use DOTALL, which means the C(.) special character matches
+ any character I(except newlines). A common mistake is to assume that
+ a negated character set like C([^#]) will also not match newlines.
+ - In order to exclude newlines, they must be added to the set like C([^#\n]).
+ - Note that, as of Ansible 2.0, short form tasks should have any escape
+ sequences backslash-escaped in order to prevent them being parsed
+ as string literal escapes. See the examples.
+ type: str
+ required: true
+ replace:
+ description:
+ - The string to replace regexp matches.
+ - May contain backreferences that will get expanded with the regexp capture groups if the regexp matches.
+ - If not set, matches are removed entirely.
+ - Backreferences can be used ambiguously like C(\1), or explicitly like C(\g<1>).
+ type: str
+ after:
+ description:
+ - If specified, only content after this match will be replaced/removed.
+ - Can be used in combination with C(before).
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ before:
+ description:
+ - If specified, only content before this match will be replaced/removed.
+ - Can be used in combination with C(after).
+ - Uses Python regular expressions; see
+ U(https://docs.python.org/3/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+ encoding:
+ description:
+ - The character encoding for reading and writing the file.
+ type: str
+ default: utf-8
+ version_added: "2.4"
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - As of Ansible 2.7.10, the combined use of I(before) and I(after) works properly. If you were relying on the
+ previous incorrect behavior, you may be need to adjust your tasks.
+ See U(https://github.com/ansible/ansible/issues/31354) for details.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+'''
+
+EXAMPLES = r'''
+- name: Replace old hostname with new hostname (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/hosts
+ regexp: '(\s+)old\.host\.name(\s+.*)?$'
+ replace: '\1new.host.name\2'
+
+- name: Replace after the expression till the end of the file (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/apache2/sites-available/default.conf
+ after: 'NameVirtualHost [*]'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Replace before the expression till the begin of the file (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/apache2/sites-available/default.conf
+ before: '# live site config'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+# Prior to Ansible 2.7.10, using before and after in combination did the opposite of what was intended.
+# see https://github.com/ansible/ansible/issues/31354 for details.
+- name: Replace between the expressions (requires Ansible >= 2.4)
+ ansible.builtin.replace:
+ path: /etc/hosts
+ after: '<VirtualHost [*]>'
+ before: '</VirtualHost>'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Supports common file attributes
+ ansible.builtin.replace:
+ path: /home/jdoe/.ssh/known_hosts
+ regexp: '^old\.host\.name[^\n]*\n'
+ owner: jdoe
+ group: jdoe
+ mode: '0644'
+
+- name: Supports a validate command
+ ansible.builtin.replace:
+ path: /etc/apache/ports
+ regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
+ replace: '\1 127.0.0.1:8080'
+ validate: '/usr/sbin/apache2ctl -f %s -t'
+
+- name: Short form task (in ansible 2+) necessitates backslash-escaped sequences
+ ansible.builtin.replace: path=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
+
+- name: Long form task does not
+ ansible.builtin.replace:
+ path: /etc/hosts
+ regexp: '\b(localhost)(\d*)\b'
+ replace: '\1\2.localdomain\2 \1\2'
+
+- name: Explicitly specifying positional matched groups in replacement
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(ListenAddress[ ]+)[^\n]+$'
+ replace: '\g<1>0.0.0.0'
+
+- name: Explicitly specifying named matched groups
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(?P<dctv>ListenAddress[ ]+)(?P<host>[^\n]+)$'
+ replace: '#\g<dctv>\g<host>\n\g<dctv>0.0.0.0'
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+import tempfile
+from traceback import format_exc
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ regexp=dict(type='str', required=True),
+ replace=dict(type='str', default=''),
+ after=dict(type='str'),
+ before=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ encoding=dict(type='str', default='utf-8'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ path = params['path']
+ encoding = params['encoding']
+ res_args = dict(rc=0)
+
+ params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
+ params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
+ params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
+ params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if not os.path.exists(path):
+ module.fail_json(rc=257, msg='Path %s does not exist !' % path)
+ else:
+ try:
+ with open(path, 'rb') as f:
+ contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to read the contents of %s: %s' % (path, to_text(e)),
+ exception=format_exc())
+
+ pattern = u''
+ if params['after'] and params['before']:
+ pattern = u'%s(?P<subsection>.*?)%s' % (params['after'], params['before'])
+ elif params['after']:
+ pattern = u'%s(?P<subsection>.*)' % params['after']
+ elif params['before']:
+ pattern = u'(?P<subsection>.*)%s' % params['before']
+
+ if pattern:
+ section_re = re.compile(pattern, re.DOTALL)
+ match = re.search(section_re, contents)
+ if match:
+ section = match.group('subsection')
+ indices = [match.start('subsection'), match.end('subsection')]
+ else:
+ res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
+ res_args['changed'] = False
+ module.exit_json(**res_args)
+ else:
+ section = contents
+
+ mre = re.compile(params['regexp'], re.MULTILINE)
+ result = re.subn(mre, params['replace'], section, 0)
+
+ if result[1] > 0 and section != result[0]:
+ if pattern:
+ result = (contents[:indices[0]] + result[0] + contents[indices[1]:], result[1])
+ msg = '%s replacements made' % result[1]
+ changed = True
+ if module._diff:
+ res_args['diff'] = {
+ 'before_header': path,
+ 'before': contents,
+ 'after_header': path,
+ 'after': result[0],
+ }
+ else:
+ msg = ''
+ changed = False
+
+ if changed and not module.check_mode:
+ if params['backup'] and os.path.exists(path):
+ res_args['backup_file'] = module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ path = os.path.realpath(path)
+ write_changes(module, to_bytes(result[0], encoding=encoding), path)
+
+ res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/rpm_key.py b/lib/ansible/modules/rpm_key.py
new file mode 100644
index 0000000..f420eec
--- /dev/null
+++ b/lib/ansible/modules/rpm_key.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+
+# Ansible module to import third party repo keys to your rpm db
+# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rpm_key
+author:
+ - Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
+short_description: Adds or removes a gpg key from the rpm db
+description:
+ - Adds or removes (rpm --import) a gpg key to your rpm database.
+version_added: "1.3"
+options:
+ key:
+ description:
+ - Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
+ already exists in the database.
+ type: str
+ required: true
+ state:
+ description:
+ - If the key will be imported or removed from the rpm db.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ validate_certs:
+ description:
+ - If C(false) and the C(key) is a url starting with https, SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ fingerprint:
+ description:
+ - The long-form fingerprint of the key being imported.
+ - This will be used to verify the specified key.
+ type: str
+ version_added: 2.9
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: rhel
+'''
+
+EXAMPLES = '''
+- name: Import a key from a url
+ ansible.builtin.rpm_key:
+ state: present
+ key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
+
+- name: Import a key from a file
+ ansible.builtin.rpm_key:
+ state: present
+ key: /path/to/key.gpg
+
+- name: Ensure a key is not present in the db
+ ansible.builtin.rpm_key:
+ state: absent
+ key: DEADB33F
+
+- name: Verify the key, using a fingerprint, before import
+ ansible.builtin.rpm_key:
+ key: /path/to/RPM-GPG-KEY.dag.txt
+ fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
+'''
+
+RETURN = r'''#'''
+
+import re
+import os.path
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_pubkey(string):
+ """Verifies if string is a pubkey"""
+ pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
+ return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
+
+
+class RpmKey(object):
+
+ def __init__(self, module):
+ # If the key is a url, we need to check if it's present to be idempotent,
+ # to do that, we need to check the keyid, which we can get from the armor.
+ keyfile = None
+ should_cleanup_keyfile = False
+ self.module = module
+ self.rpm = self.module.get_bin_path('rpm', True)
+ state = module.params['state']
+ key = module.params['key']
+ fingerprint = module.params['fingerprint']
+ if fingerprint:
+ fingerprint = fingerprint.replace(' ', '').upper()
+
+ self.gpg = self.module.get_bin_path('gpg')
+ if not self.gpg:
+ self.gpg = self.module.get_bin_path('gpg2', required=True)
+
+ if '://' in key:
+ keyfile = self.fetch_key(key)
+ keyid = self.getkeyid(keyfile)
+ should_cleanup_keyfile = True
+ elif self.is_keyid(key):
+ keyid = key
+ elif os.path.isfile(key):
+ keyfile = key
+ keyid = self.getkeyid(keyfile)
+ else:
+ self.module.fail_json(msg="Not a valid key %s" % key)
+ keyid = self.normalize_keyid(keyid)
+
+ if state == 'present':
+ if self.is_key_imported(keyid):
+ module.exit_json(changed=False)
+ else:
+ if not keyfile:
+ self.module.fail_json(msg="When importing a key, a valid file must be given")
+ if fingerprint:
+ has_fingerprint = self.getfingerprint(keyfile)
+ if fingerprint != has_fingerprint:
+ self.module.fail_json(
+ msg="The specified fingerprint, '%s', does not match the key fingerprint '%s'" % (fingerprint, has_fingerprint)
+ )
+ self.import_key(keyfile)
+ if should_cleanup_keyfile:
+ self.module.cleanup(keyfile)
+ module.exit_json(changed=True)
+ else:
+ if self.is_key_imported(keyid):
+ self.drop_key(keyid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ def fetch_key(self, url):
+ """Downloads a key from url, returns a valid path to a gpg key"""
+ rsp, info = fetch_url(self.module, url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
+
+ key = rsp.read()
+ if not is_pubkey(key):
+ self.module.fail_json(msg="Not a public key: %s" % url)
+ tmpfd, tmpname = tempfile.mkstemp()
+ self.module.add_cleanup_file(tmpname)
+ tmpfile = os.fdopen(tmpfd, "w+b")
+ tmpfile.write(key)
+ tmpfile.close()
+ return tmpname
+
+ def normalize_keyid(self, keyid):
+ """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
+ ret = keyid.strip().upper()
+ if ret.startswith('0x'):
+ return ret[2:]
+ elif ret.startswith('0X'):
+ return ret[2:]
+ else:
+ return ret
+
+ def getkeyid(self, keyfile):
+ stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('pub:'):
+ return line.split(':')[4]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def getfingerprint(self, keyfile):
+ stdout, stderr = self.execute_command([
+ self.gpg, '--no-tty', '--batch', '--with-colons',
+ '--fixed-list-mode', '--with-fingerprint', keyfile
+ ])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('fpr:'):
+ # As mentioned here,
+ #
+ # https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
+ #
+ # The description of the `fpr` field says
+ #
+ # "fpr :: Fingerprint (fingerprint is in field 10)"
+ #
+ return line.split(':')[9]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def is_keyid(self, keystr):
+ """Verifies if a key, as provided by the user is a keyid"""
+ return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
+
+ def execute_command(self, cmd):
+ rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+ return stdout, stderr
+
+ def is_key_imported(self, keyid):
+ cmd = self.rpm + ' -q gpg-pubkey'
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0: # No key is installed on system
+ return False
+ cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
+ stdout, stderr = self.execute_command(cmd)
+ for line in stdout.splitlines():
+ if keyid in line.split(':')[4]:
+ return True
+ return False
+
+ def import_key(self, keyfile):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--import', keyfile])
+
+ def drop_key(self, keyid):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ key=dict(type='str', required=True, no_log=False),
+ fingerprint=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ RpmKey(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/script.py b/lib/ansible/modules/script.py
new file mode 100644
index 0000000..2cefc0a
--- /dev/null
+++ b/lib/ansible/modules/script.py
@@ -0,0 +1,108 @@
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: script
+version_added: "0.9"
+short_description: Runs a local script on a remote node after transferring it
+description:
+ - The C(script) module takes the script name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - The local script at path will be transferred to the remote node and then executed.
+ - The given script will be processed through the shell environment on the remote node.
+ - This module does not require python on the remote system, much like the M(ansible.builtin.raw) module.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - Path to the local script file followed by optional arguments.
+ cmd:
+ type: str
+ description:
+ - Path to the local script to run followed by optional arguments.
+ creates:
+ description:
+ - A filename on the remote node, when it already exists, this step will B(not) be run.
+ version_added: "1.5"
+ removes:
+ description:
+ - A filename on the remote node, when it does not exist, this step will B(not) be run.
+ version_added: "1.5"
+ chdir:
+ description:
+ - Change into this directory on the remote node before running the script.
+ version_added: "2.4"
+ executable:
+ description:
+ - Name or path of a executable to invoke the script with.
+ version_added: "2.6"
+notes:
+ - It is usually preferable to write Ansible modules rather than pushing scripts. Convert your script to an Ansible module for bonus points!
+ - The C(ssh) connection plugin will force pseudo-tty allocation via C(-tt) when scripts are executed. Pseudo-ttys do not have a stderr channel and all
+ stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
+ - If the path to the local script contains spaces, it needs to be quoted.
+ - This module is also supported for Windows targets.
+seealso:
+ - module: ansible.builtin.shell
+ - module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.files
+ - action_common_attributes.raw
+ - decrypt
+attributes:
+ check_mode:
+ support: partial
+ details: while the script itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ diff_mode:
+ support: none
+ platform:
+ details: This action is one of the few that requires no Python on the remote as it passes the command directly into the connection string
+ platforms: all
+ raw:
+ support: full
+ safe_file_operations:
+ support: none
+ vault:
+ support: full
+'''
+
+EXAMPLES = r'''
+- name: Run a script with arguments (free form)
+ ansible.builtin.script: /some/local/script.sh --some-argument 1234
+
+- name: Run a script with arguments (using 'cmd' parameter)
+ ansible.builtin.script:
+ cmd: /some/local/script.sh --some-argument 1234
+
+- name: Run a script only if file.txt does not exist on the remote node
+ ansible.builtin.script: /some/local/create_file.sh --some-argument 1234
+ args:
+ creates: /the/created/file.txt
+
+- name: Run a script only if file.txt exists on the remote node
+ ansible.builtin.script: /some/local/remove_file.sh --some-argument 1234
+ args:
+ removes: /the/removed/file.txt
+
+- name: Run a script using an executable in a non-system path
+ ansible.builtin.script: /some/local/script
+ args:
+ executable: /some/remote/executable
+
+- name: Run a script using an executable in a system path
+ ansible.builtin.script: /some/local/script.py
+ args:
+ executable: python3
+
+- name: Run a Powershell script on a windows host
+ script: subdirectories/under/path/with/your/playbook/script.ps1
+'''
diff --git a/lib/ansible/modules/service.py b/lib/ansible/modules/service.py
new file mode 100644
index 0000000..a84829c
--- /dev/null
+++ b/lib/ansible/modules/service.py
@@ -0,0 +1,1699 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service
+version_added: "0.1"
+short_description: Manage services
+description:
+ - Controls services on remote hosts. Supported init systems include BSD init,
+ OpenRC, SysV, Solaris SMF, systemd, upstart.
+ - This module acts as a proxy to the underlying service manager module. While all arguments will be passed to the
+ underlying module, not all modules support the same arguments. This documentation only covers the minimum intersection
+ of module arguments that all service manager modules support.
+ - This module is a proxy for multiple more specific service manager modules
+ (such as M(ansible.builtin.systemd) and M(ansible.builtin.sysvinit)).
+ This allows management of a heterogeneous environment of machines without creating a specific task for
+ each service manager. The module to be executed is determined by the I(use) option, which defaults to the
+ service manager discovered by M(ansible.builtin.setup). If C(setup) was not yet run, this module may run it.
+ - For Windows targets, use the M(ansible.windows.win_service) module instead.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - C(restarted) will always bounce the service.
+ - C(reloaded) will always reload.
+ - B(At least one of state and enabled are required.)
+ - Note that reloaded will start the service if it is not already started,
+ even if your chosen init system wouldn't normally.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ sleep:
+ description:
+ - If the service is being C(restarted) then sleep this many seconds
+ between the stop and start command.
+ - This helps to work around badly-behaving init scripts that exit immediately
+ after signaling a process to stop.
+ - Not all service managers support sleep, i.e when using systemd this setting will be ignored.
+ type: int
+ version_added: "1.3"
+ pattern:
+ description:
+ - If the service does not respond to the status command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be started.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ version_added: "0.7"
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ runlevel:
+ description:
+ - For OpenRC init scripts (e.g. Gentoo) only.
+ - The runlevel that this service belongs to.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ default: default
+ arguments:
+ description:
+ - Additional arguments provided on the command line.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ aliases: [ args ]
+ use:
+ description:
+ - The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module.
+ - Normally it uses the value of the 'ansible_service_mgr' fact and falls back to the old 'service' module when none matching is found.
+ type: str
+ default: auto
+ version_added: 2.2
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: full
+ bypass_host_loop:
+ support: none
+ check_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ diff_mode:
+ details: support depends on the underlying plugin invoked
+ support: N/A
+ platform:
+ details: The support depends on the availability for the specific plugin for each platform and if fact gathering is able to detect it
+ platforms: all
+notes:
+ - For AIX, group subsystem names can be used.
+seealso:
+ - module: ansible.windows.win_service
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Start service httpd, if not started
+ ansible.builtin.service:
+ name: httpd
+ state: started
+
+- name: Stop service httpd, if started
+ ansible.builtin.service:
+ name: httpd
+ state: stopped
+
+- name: Restart service httpd, in all cases
+ ansible.builtin.service:
+ name: httpd
+ state: restarted
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.service:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd, and not touch the state
+ ansible.builtin.service:
+ name: httpd
+ enabled: yes
+
+- name: Start service foo, based on running process /usr/bin/foo
+ ansible.builtin.service:
+ name: foo
+ pattern: /usr/bin/foo
+ state: started
+
+- name: Restart network service for interface eth0
+ ansible.builtin.service:
+ name: network
+ state: restarted
+ args: eth0
+'''
+
+RETURN = r'''#'''
+
+import glob
+import json
+import os
+import platform
+import re
+import select
+import shlex
+import subprocess
+import tempfile
+import time
+
+# The distutils module is not shipped with SUNWPython on Solaris.
+# It's in the SUNWPython-devel package which also contains development files
+# that don't belong on production boxes. Since our Solaris code doesn't
+# depend on LooseVersion, do not import it on Solaris.
+if platform.system() != 'SunOS':
+ from ansible.module_utils.compat.version import LooseVersion
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils.six import PY2, b
+
+
+class Service(object):
+ """
+ This is the generic Service manipulation class that is subclassed
+ based on platform.
+
+ A subclass should override the following action methods:-
+ - get_service_tools
+ - service_enable
+ - get_service_status
+ - service_control
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Service)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+ self.sleep = module.params['sleep']
+ self.pattern = module.params['pattern']
+ self.enable = module.params['enabled']
+ self.runlevel = module.params['runlevel']
+ self.changed = False
+ self.running = None
+ self.crashed = None
+ self.action = None
+ self.svc_cmd = None
+ self.svc_initscript = None
+ self.svc_initctl = None
+ self.enable_cmd = None
+ self.arguments = module.params.get('arguments', '')
+ self.rcconf_file = None
+ self.rcconf_key = None
+ self.rcconf_value = None
+ self.svc_change = False
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get_service_tools(self):
+ self.module.fail_json(msg="get_service_tools not implemented on target platform")
+
+ def service_enable(self):
+ self.module.fail_json(msg="service_enable not implemented on target platform")
+
+ def get_service_status(self):
+ self.module.fail_json(msg="get_service_status not implemented on target platform")
+
+ def service_control(self):
+ self.module.fail_json(msg="service_control not implemented on target platform")
+
+ # ===========================================
+ # Generic methods that should be used on all platforms.
+
+ def execute_command(self, cmd, daemonize=False):
+
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+
+ # Most things don't need to be daemonized
+ if not daemonize:
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ return self.module.run_command(cmd, environ_update=lang_env)
+
+ # This is complex because daemonization is hard for people.
+ # What we do is daemonize a part of this module, the daemon runs the
+ # command, picks up the return code and output, and returns it to the
+ # main process.
+ pipe = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ os.close(pipe[0])
+ # Set stdin/stdout/stderr to /dev/null
+ fd = os.open(os.devnull, os.O_RDWR)
+ if fd != 0:
+ os.dup2(fd, 0)
+ if fd != 1:
+ os.dup2(fd, 1)
+ if fd != 2:
+ os.dup2(fd, 2)
+ if fd not in (0, 1, 2):
+ os.close(fd)
+
+ # Make us a daemon. Yes, that's all it takes.
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ os.setsid()
+ os.chdir("/")
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ # Start the command
+ if PY2:
+ # Python 2.6's shlex.split can't handle text strings correctly
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+ cmd = shlex.split(cmd)
+ else:
+ # Python3.x shex.split text strings.
+ cmd = to_text(cmd, errors='surrogate_or_strict')
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
+ # In either of the above cases, pass a list of byte strings to Popen
+
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=lang_env, preexec_fn=lambda: os.close(pipe[1]))
+ stdout = b("")
+ stderr = b("")
+ fds = [p.stdout, p.stderr]
+ # Wait for all output, or until the main process is dead and its output is done.
+ while fds:
+ rfd, wfd, efd = select.select(fds, [], fds, 1)
+ if not (rfd + wfd + efd) and p.poll() is not None:
+ break
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stdout)
+ stdout += dat
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stderr)
+ stderr += dat
+ p.wait()
+ # Return a JSON blob to parent
+ blob = json.dumps([p.returncode, to_text(stdout), to_text(stderr)])
+ os.write(pipe[1], to_bytes(blob, errors='surrogate_or_strict'))
+ os.close(pipe[1])
+ os._exit(0)
+ elif pid == -1:
+ self.module.fail_json(msg="unable to fork")
+ else:
+ os.close(pipe[1])
+ os.waitpid(pid, 0)
+ # Wait for data from daemon process and process it.
+ data = b("")
+ while True:
+ rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
+ if pipe[0] in rfd:
+ dat = os.read(pipe[0], 4096)
+ if not dat:
+ break
+ data += dat
+ return json.loads(to_text(data, errors='surrogate_or_strict'))
+
+ def check_ps(self):
+ # Set ps flags
+ if platform.system() == 'SunOS':
+ psflags = '-ef'
+ else:
+ psflags = 'auxww'
+
+ # Find ps binary
+ psbin = self.module.get_bin_path('ps', True)
+
+ (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ self.running = False
+ lines = psout.split("\n")
+ for line in lines:
+ if self.pattern in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ self.running = True
+ break
+
+ def check_service_changed(self):
+ if self.state and self.running is None:
+ self.module.fail_json(msg="failed determining service state, possible typo of service name?")
+ # Find out if state has changed
+ if not self.running and self.state in ["reloaded", "started"]:
+ self.svc_change = True
+ elif self.running and self.state in ["reloaded", "stopped"]:
+ self.svc_change = True
+ elif self.state == "restarted":
+ self.svc_change = True
+ if self.module.check_mode and self.svc_change:
+ self.module.exit_json(changed=True, msg='service state changed')
+
+ def modify_service_state(self):
+
+ # Only do something if state will change
+ if self.svc_change:
+ # Control service
+ if self.state in ['started']:
+ self.action = "start"
+ elif not self.running and self.state == 'reloaded':
+ self.action = "start"
+ elif self.state == 'stopped':
+ self.action = "stop"
+ elif self.state == 'reloaded':
+ self.action = "reload"
+ elif self.state == 'restarted':
+ self.action = "restart"
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='changing service state')
+
+ return self.service_control()
+
+ else:
+ # If nothing needs to change just say all is well
+ rc = 0
+ err = ''
+ out = ''
+ return rc, out, err
+
+ def service_enable_rcconf(self):
+ if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
+ self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
+
+ self.changed = None
+ entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
+ with open(self.rcconf_file, "r") as RCFILE:
+ new_rc_conf = []
+
+ # Build a list containing the possibly modified file.
+ for rcline in RCFILE:
+ # Parse line removing whitespaces, quotes, etc.
+ rcarray = shlex.split(rcline, comments=True)
+ if len(rcarray) >= 1 and '=' in rcarray[0]:
+ (key, value) = rcarray[0].split("=", 1)
+ if key == self.rcconf_key:
+ if value.upper() == self.rcconf_value:
+ # Since the proper entry already exists we can stop iterating.
+ self.changed = False
+ break
+ else:
+ # We found the key but the value is wrong, replace with new entry.
+ rcline = entry
+ self.changed = True
+
+ # Add line to the list.
+ new_rc_conf.append(rcline.strip() + '\n')
+
+ # If we did not see any trace of our entry we need to add it.
+ if self.changed is None:
+ new_rc_conf.append(entry)
+ self.changed = True
+
+ if self.changed is True:
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ # Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
+ # This way the replacement operation is atomic.
+ rcconf_dir = os.path.dirname(self.rcconf_file)
+ rcconf_base = os.path.basename(self.rcconf_file)
+ (TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
+
+ # Write out the contents of the list into our temporary file.
+ for rcline in new_rc_conf:
+ os.write(TMP_RCCONF, rcline.encode())
+
+ # Close temporary file.
+ os.close(TMP_RCCONF)
+
+ # Replace previous rc.conf.
+ self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
+
+
+class LinuxService(Service):
+ """
+ This is the Linux Service manipulation class - it is currently supporting
+ a mixture of binaries and init scripts for controlling services started at
+ boot, as well as for controlling the current state.
+ """
+
+ platform = 'Linux'
+ distribution = None
+
+ def get_service_tools(self):
+
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv']
+ initpaths = ['/etc/init.d']
+ location = dict()
+
+ for binary in binaries:
+ location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ def check_systemd():
+
+ # tools must be installed
+ if location.get('systemctl', False):
+
+ # this should show if systemd is the boot init system
+ # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
+ for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
+ if os.path.exists(canary):
+ return True
+
+ # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+
+ for line in f:
+ if 'systemd' in line:
+ return True
+
+ return False
+
+ # Locate a tool to enable/disable a service
+ if check_systemd():
+ # service is managed by systemd
+ self.__systemd_unit = self.name
+ self.svc_cmd = location['systemctl']
+ self.enable_cmd = location['systemctl']
+
+ elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
+ # service is managed by upstart
+ self.enable_cmd = location['initctl']
+ # set the upstart version based on the output of 'initctl version'
+ self.upstart_version = LooseVersion('0.0.0')
+ try:
+ version_re = re.compile(r'\(upstart (.*)\)')
+ rc, stdout, stderr = self.module.run_command('%s version' % location['initctl'])
+ if rc == 0:
+ res = version_re.search(stdout)
+ if res:
+ self.upstart_version = LooseVersion(res.groups()[0])
+ except Exception:
+ pass # we'll use the default of 0.0.0
+
+ self.svc_cmd = location['initctl']
+
+ elif location.get('rc-service', False):
+ # service is managed by OpenRC
+ self.svc_cmd = location['rc-service']
+ self.enable_cmd = location['rc-update']
+ return # already have service start/stop tool too!
+
+ elif self.svc_initscript:
+ # service is managed by with SysV init scripts
+ if location.get('update-rc.d', False):
+ # and uses update-rc.d
+ self.enable_cmd = location['update-rc.d']
+ elif location.get('insserv', None):
+ # and uses insserv
+ self.enable_cmd = location['insserv']
+ elif location.get('chkconfig', False):
+ # and uses chkconfig
+ self.enable_cmd = location['chkconfig']
+
+ if self.enable_cmd is None:
+ fail_if_missing(self.module, False, self.name, msg='host')
+
+ # If no service control tool selected yet, try to see if 'service' is available
+ if self.svc_cmd is None and location.get('service', False):
+ self.svc_cmd = location['service']
+
+ # couldn't find anything yet
+ if self.svc_cmd is None and not self.svc_initscript:
+ self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
+
+ if location.get('initctl', False):
+ self.svc_initctl = location['initctl']
+
+ def get_systemd_service_enabled(self):
+ def sysv_exists(name):
+ script = '/etc/init.d/' + name
+ return os.access(script, os.X_OK)
+
+ def sysv_is_enabled(name):
+ return bool(glob.glob('/etc/rc?.d/S??' + name))
+
+ service_name = self.__systemd_unit
+ (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
+ if rc == 0:
+ return True
+ elif out.startswith('disabled'):
+ return False
+ elif sysv_exists(service_name):
+ return sysv_is_enabled(service_name)
+ else:
+ return False
+
+ def get_systemd_status_dict(self):
+
+ # Check status first as show will not fail if service does not exist
+ (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
+ if rc != 0:
+ self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
+ elif 'LoadState=not-found' in out:
+ self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
+
+ key = None
+ value_buffer = []
+ status_dict = {}
+ for line in out.splitlines():
+ if '=' in line:
+ if not key:
+ key, value = line.split('=', 1)
+ # systemd fields that are shell commands can be multi-line
+ # We take a value that begins with a "{" as the start of
+ # a shell command and a line that ends with "}" as the end of
+ # the command
+ if value.lstrip().startswith('{'):
+ if value.rstrip().endswith('}'):
+ status_dict[key] = value
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ status_dict[key] = value
+ key = None
+ else:
+ if line.rstrip().endswith('}'):
+ status_dict[key] = '\n'.join(value_buffer)
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ value_buffer.append(value)
+
+ return status_dict
+
+ def get_systemd_service_status(self):
+ d = self.get_systemd_status_dict()
+ if d.get('ActiveState') == 'active':
+ # run-once services (for which a single successful exit indicates
+ # that they are running as designed) should not be restarted here.
+ # Thus, we are not checking d['SubState'].
+ self.running = True
+ self.crashed = False
+ elif d.get('ActiveState') == 'failed':
+ self.running = False
+ self.crashed = True
+ elif d.get('ActiveState') is None:
+ self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
+ else:
+ self.running = False
+ self.crashed = False
+ return self.running
+
+ def get_service_status(self):
+ if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
+ return self.get_systemd_service_status()
+
+ self.action = "status"
+ rc, status_stdout, status_stderr = self.service_control()
+
+ # if we have decided the service is managed by upstart, we check for some additional output...
+ if self.svc_initctl and self.running is None:
+ # check the job status by upstart response
+ initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s %s" % (self.svc_initctl, self.name, self.arguments))
+ if "stop/waiting" in initctl_status_stdout:
+ self.running = False
+ elif "start/running" in initctl_status_stdout:
+ self.running = True
+
+ if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
+ openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
+ self.running = "started" in openrc_status_stdout
+ self.crashed = "crashed" in openrc_status_stderr
+
+ # Prefer a non-zero return code. For reference, see:
+ # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ if self.running is None and rc in [1, 2, 3, 4, 69]:
+ self.running = False
+
+ # if the job status is still not known check it by status output keywords
+ # Only check keywords if there's only one line of output (some init
+ # scripts will output verbosely in case of error and those can emit
+ # keywords that are picked up as false positives
+ if self.running is None and status_stdout.count('\n') <= 1:
+ # first transform the status output that could irritate keyword matching
+ cleanout = status_stdout.lower().replace(self.name.lower(), '')
+ if "stop" in cleanout:
+ self.running = False
+ elif "run" in cleanout:
+ self.running = not ("not " in cleanout)
+ elif "start" in cleanout and "not " not in cleanout:
+ self.running = True
+ elif 'could not access pid file' in cleanout:
+ self.running = False
+ elif 'is dead and pid file exists' in cleanout:
+ self.running = False
+ elif 'dead but subsys locked' in cleanout:
+ self.running = False
+ elif 'dead but pid file exists' in cleanout:
+ self.running = False
+
+ # if the job status is still not known and we got a zero for the
+ # return code, assume here that the service is running
+ if self.running is None and rc == 0:
+ self.running = True
+
+ # if the job status is still not known check it by special conditions
+ if self.running is None:
+ if self.name == 'iptables' and "ACCEPT" in status_stdout:
+ # iptables status command output is lame
+ # TODO: lookup if we can use a return code for this instead?
+ self.running = True
+
+ return self.running
+
+ def service_enable(self):
+
+ if self.enable_cmd is None:
+ self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
+
+ self.changed = True
+ action = None
+
+ #
+ # Upstart's initctl
+ #
+ if self.enable_cmd.endswith("initctl"):
+ def write_to_override_file(file_name, file_contents, ):
+ override_file = open(file_name, 'w')
+ override_file.write(file_contents)
+ override_file.close()
+
+ initpath = '/etc/init'
+ if self.upstart_version >= LooseVersion('0.6.7'):
+ manreg = re.compile(r'^manual\s*$', re.M | re.I)
+ config_line = 'manual\n'
+ else:
+ manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
+ config_line = 'start on manual\n'
+ conf_file_name = "%s/%s.conf" % (initpath, self.name)
+ override_file_name = "%s/%s.override" % (initpath, self.name)
+
+ # Check to see if files contain the manual line in .conf and fail if True
+ with open(conf_file_name) as conf_file_fh:
+ conf_file_content = conf_file_fh.read()
+ if manreg.search(conf_file_content):
+ self.module.fail_json(msg="manual stanza not supported in a .conf file")
+
+ self.changed = False
+ if os.path.exists(override_file_name):
+ with open(override_file_name) as override_fh:
+ override_file_contents = override_fh.read()
+ # Remove manual stanza if present and service enabled
+ if self.enable and manreg.search(override_file_contents):
+ self.changed = True
+ override_state = manreg.sub('', override_file_contents)
+ # Add manual stanza if not present and service disabled
+ elif not (self.enable) and not (manreg.search(override_file_contents)):
+ self.changed = True
+ override_state = '\n'.join((override_file_contents, config_line))
+ # service already in desired state
+ else:
+ pass
+ # Add file with manual stanza if service disabled
+ elif not (self.enable):
+ self.changed = True
+ override_state = config_line
+ else:
+ # service already in desired state
+ pass
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ # The initctl method of enabling and disabling services is much
+ # different than for the other service methods. So actually
+ # committing the change is done in this conditional and then we
+ # skip the boilerplate at the bottom of the method
+ if self.changed:
+ try:
+ write_to_override_file(override_file_name, override_state)
+ except Exception:
+ self.module.fail_json(msg='Could not modify override file')
+
+ return
+
+ #
+ # SysV's chkconfig
+ #
+ if self.enable_cmd.endswith("chkconfig"):
+ if self.enable:
+ action = 'on'
+ else:
+ action = 'off'
+
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if 'chkconfig --add %s' % self.name in err:
+ self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if self.name not in out:
+ self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
+ # TODO: look back on why this is here
+ # state = out.split()[-1]
+
+ # Check if we're already in the correct state
+ if "3:%s" % action in out and "5:%s" % action in out:
+ self.changed = False
+ return
+
+ #
+ # Systemd's systemctl
+ #
+ if self.enable_cmd.endswith("systemctl"):
+ if self.enable:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # Check if we're already in the correct state
+ service_enabled = self.get_systemd_service_enabled()
+
+ # self.changed should already be true
+ if self.enable == service_enabled:
+ self.changed = False
+ return
+
+ #
+ # OpenRC's rc-update
+ #
+ if self.enable_cmd.endswith("rc-update"):
+ if self.enable:
+ action = 'add'
+ else:
+ action = 'delete'
+
+ (rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
+ for line in out.splitlines():
+ service_name, runlevels = line.split('|')
+ service_name = service_name.strip()
+ if service_name != self.name:
+ continue
+ runlevels = re.split(r'\s+', runlevels)
+ # service already enabled for the runlevel
+ if self.enable and self.runlevel in runlevels:
+ self.changed = False
+ # service already disabled for the runlevel
+ elif not self.enable and self.runlevel not in runlevels:
+ self.changed = False
+ break
+ else:
+ # service already disabled altogether
+ if not self.enable:
+ self.changed = False
+
+ if not self.changed:
+ return
+
+ #
+ # update-rc.d style
+ #
+ if self.enable_cmd.endswith("update-rc.d"):
+
+ enabled = False
+ slinks = glob.glob('/etc/rc?.d/S??' + self.name)
+ if slinks:
+ enabled = True
+
+ if self.enable != enabled:
+ self.changed = True
+
+ if self.enable:
+ action = 'enable'
+ klinks = glob.glob('/etc/rc?.d/K??' + self.name)
+ if not klinks:
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ action = 'disable'
+
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ self.changed = False
+
+ return
+
+ #
+ # insserv (Debian <=7, SLES, others)
+ #
+ if self.enable_cmd.endswith("insserv"):
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s -n -v %s" % (self.enable_cmd, self.name))
+ else:
+ (rc, out, err) = self.execute_command("%s -n -r -v %s" % (self.enable_cmd, self.name))
+
+ self.changed = False
+ for line in err.splitlines():
+ if self.enable and line.find('enable service') != -1:
+ self.changed = True
+ break
+ if not self.enable and line.find('remove service') != -1:
+ self.changed = True
+ break
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ if not self.changed:
+ return
+
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+ else:
+ (rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+
+ #
+ # If we've gotten to the end, the service needs to be updated
+ #
+ self.changed = True
+
+ # we change argument order depending on real binary used:
+ # rc-update and systemctl need the argument order reversed
+
+ if self.enable_cmd.endswith("rc-update"):
+ args = (self.enable_cmd, action, self.name + " " + self.runlevel)
+ elif self.enable_cmd.endswith("systemctl"):
+ args = (self.enable_cmd, action, self.__systemd_unit)
+ else:
+ args = (self.enable_cmd, self.name, action)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ (rc, out, err) = self.execute_command("%s %s %s" % args)
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
+ else:
+ self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
+
+ return (rc, out, err)
+
+ def service_control(self):
+
+ # Decide what command to run
+ svc_cmd = ''
+ arguments = self.arguments
+ if self.svc_cmd:
+ if not self.svc_cmd.endswith("systemctl"):
+ if self.svc_cmd.endswith("initctl"):
+ # initctl commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.name, arguments)
+ else:
+ # SysV and OpenRC take the form <cmd> <name> <action>
+ svc_cmd = "%s %s" % (self.svc_cmd, self.name)
+ else:
+ # systemd commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.__systemd_unit, arguments)
+ elif self.svc_cmd is None and self.svc_initscript:
+ # upstart
+ svc_cmd = "%s" % self.svc_initscript
+
+ # In OpenRC, if a service crashed, we need to reset its status to
+ # stopped with the zap command, before we can start it back.
+ if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
+ self.execute_command("%s zap" % svc_cmd, daemonize=True)
+
+ if self.action != "restart":
+ if svc_cmd != '':
+ # upstart or systemd or OpenRC
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # SysV
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
+ elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
+ # All services in OpenRC support restart.
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # In other systems, not all services support restart. Do it the hard way.
+ if svc_cmd != '':
+ # upstart or systemd
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
+ else:
+ # SysV
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ if svc_cmd != '':
+ # upstart or systemd
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
+ else:
+ # SysV
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
+
+ # merge return information
+ if rc1 != 0 and rc2 == 0:
+ rc_state = rc2
+ stdout = stdout2
+ stderr = stderr2
+ else:
+ rc_state = rc1 + rc2
+ stdout = stdout1 + stdout2
+ stderr = stderr1 + stderr2
+
+ return (rc_state, stdout, stderr)
+
+
+class FreeBsdService(Service):
+ """
+ This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svc_cmd = self.module.get_bin_path('service', True)
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find service binary')
+
+ self.sysrc_cmd = self.module.get_bin_path('sysrc')
+
+ def get_service_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments))
+ if self.name == "pf":
+ self.running = "Enabled" in stdout
+ else:
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf', '/etc/rc.conf.local', '/usr/local/etc/rc.conf']
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
+ try:
+ rcvars = shlex.split(stdout, comments=True)
+ except Exception:
+ # TODO: add a warning to the output with the failure
+ pass
+
+ if not rcvars:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ # In rare cases, i.e. sendmail, rcvar can return several key=value pairs
+ # Usually there is just one, however. In other rare cases, i.e. uwsgi,
+ # rcvar can return extra uncommented data that is not at all related to
+ # the rcvar. We will just take the first key=value pair we come across
+ # and hope for the best.
+ for rcvar in rcvars:
+ if '=' in rcvar:
+ self.rcconf_key, default_rcconf_value = rcvar.split('=', 1)
+ break
+
+ if self.rcconf_key is None:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ if self.sysrc_cmd: # FreeBSD >= 9.2
+
+ rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key))
+ # it can happen that rcvar is not set (case of a system coming from the ports collection)
+ # so we will fallback on the default
+ if rc != 0:
+ current_rcconf_value = default_rcconf_value
+
+ if current_rcconf_value.strip().upper() != self.rcconf_value:
+
+ self.changed = True
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ rc, change_stdout, change_stderr = self.execute_command("%s %s=\"%s\"" % (self.sysrc_cmd, self.rcconf_key, self.rcconf_value))
+ if rc != 0:
+ self.module.fail_json(msg="unable to set rcvar using sysrc", stdout=change_stdout, stderr=change_stderr)
+
+ # sysrc does not exit with code 1 on permission error => validate successful change using service(8)
+ rc, check_stdout, check_stderr = self.execute_command("%s %s %s" % (self.svc_cmd, self.name, "enabled"))
+ if self.enable != (rc == 0): # rc = 0 indicates enabled service, rc = 1 indicates disabled service
+ self.module.fail_json(msg="unable to set rcvar: sysrc did not change value", stdout=change_stdout, stderr=change_stderr)
+
+ else:
+ self.changed = False
+
+ else: # Legacy (FreeBSD < 9.2)
+ try:
+ return self.service_enable_rcconf()
+ except Exception:
+ self.module.fail_json(msg='unable to set rcvar')
+
+ def service_control(self):
+
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+ if self.action == "reload":
+ self.action = "onereload"
+
+ ret = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ return ret
+
+
+class DragonFlyBsdService(FreeBsdService):
+ """
+ This is the DragonFly BSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'DragonFly'
+ distribution = None
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
+
+ return self.service_enable_rcconf()
+
+
+class OpenBsdService(Service):
+ """
+ This is the OpenBSD Service manipulation class - it uses rcctl(8) or
+ /etc/rc.d scripts for service control. Enabling a service is
+ only supported if rcctl is present.
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.enable_cmd = self.module.get_bin_path('rcctl')
+
+ if self.enable_cmd:
+ self.svc_cmd = self.enable_cmd
+ else:
+ rcdir = '/etc/rc.d'
+
+ rc_script = "%s/%s" % (rcdir, self.name)
+ if os.path.isfile(rc_script):
+ self.svc_cmd = rc_script
+
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find svc_cmd')
+
+ def get_service_status(self):
+ if self.enable_cmd:
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
+ else:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.enable_cmd:
+ return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name), daemonize=True)
+ else:
+ return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
+
+ def service_enable(self):
+ if not self.enable_cmd:
+ return super(OpenBsdService, self).service_enable()
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ getdef_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'getdef' may be
+ # either a set of flags or the boolean YES/NO
+ if getdef_string == "YES" or getdef_string == "NO":
+ default_flags = ''
+ else:
+ default_flags = getdef_string
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ get_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'get' may be
+ # either a set of flags or the boolean YES/NO
+ if get_string == "YES" or get_string == "NO":
+ current_flags = ''
+ else:
+ current_flags = get_string
+
+ # If there are arguments from the user we use these as flags unless
+ # they are already set.
+ if self.arguments and self.arguments != current_flags:
+ changed_flags = self.arguments
+ # If the user has not supplied any arguments and the current flags
+ # differ from the default we reset them.
+ elif not self.arguments and current_flags != default_flags:
+ changed_flags = ' '
+ # Otherwise there is no need to modify flags.
+ else:
+ changed_flags = ''
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
+
+ if self.enable:
+ if rc == 0 and not changed_flags:
+ return
+
+ if rc != 0:
+ status_action = "set %s status on" % (self.name)
+ else:
+ status_action = ''
+ if changed_flags:
+ flags_action = "set %s flags %s" % (self.name, changed_flags)
+ else:
+ flags_action = ''
+ else:
+ if rc == 1:
+ return
+
+ status_action = "set %s status off" % self.name
+ flags_action = ''
+
+ # Verify state assumption
+ if not status_action and not flags_action:
+ self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ status_modified = 0
+ if status_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg="rcctl failed to modify service status")
+
+ status_modified = 1
+
+ if flags_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
+
+ if rc != 0:
+ if stderr:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags: " + stderr
+ else:
+ error_message = stderr
+ else:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags"
+ else:
+ error_message = "rcctl failed to modify service flags"
+
+ self.module.fail_json(msg=error_message)
+
+ self.changed = True
+
+
+class NetBsdService(Service):
+ """
+ This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot, check status and perform
+ direct service manipulation. Init scripts in /etc/rc.d are used for
+ controlling services (start/stop) as well as for controlling the current
+ state.
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ initpaths = ['/etc/rc.d'] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ if not self.svc_initscript:
+ self.module.fail_json(msg='unable to find rc.d script')
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % self.name.replace("-", "_")
+
+ return self.service_enable_rcconf()
+
+ def get_service_status(self):
+ self.svc_cmd = "%s" % self.svc_initscript
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+
+ self.svc_cmd = "%s" % self.svc_initscript
+ return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
+
+
+class SunOSService(Service):
+ """
+ This is the SunOS Service manipulation class - it uses the svcadm
+ command for controlling services, and svcs command for checking status.
+ It also tries to be smart about taking the service out of maintenance
+ state if necessary.
+ """
+ platform = 'SunOS'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svcs_cmd = self.module.get_bin_path('svcs', True)
+
+ if not self.svcs_cmd:
+ self.module.fail_json(msg='unable to find svcs binary')
+
+ self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
+
+ if not self.svcadm_cmd:
+ self.module.fail_json(msg='unable to find svcadm binary')
+
+ if self.svcadm_supports_sync():
+ self.svcadm_sync = '-s'
+ else:
+ self.svcadm_sync = ''
+
+ def svcadm_supports_sync(self):
+ # Support for synchronous restart/refresh is only supported on
+ # Oracle Solaris >= 11.2
+ for line in open('/etc/release', 'r').readlines():
+ m = re.match(r'\s+Oracle Solaris (\d+)\.(\d+).*', line.rstrip())
+ if m and m.groups() >= ('11', '2'):
+ return True
+
+ def get_service_status(self):
+ status = self.get_sunos_svcs_status()
+ # Only 'online' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'online':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_sunos_svcs_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[0]
+ # status is one of: online, offline, degraded, disabled, maintenance, uninitialized
+ # see man svcs(1)
+ return status
+
+ def service_enable(self):
+ # Get current service enablement status
+ rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ enabled = False
+ temporary = False
+
+ # look for enabled line, which could be one of:
+ # enabled true (temporary)
+ # enabled false (temporary)
+ # enabled true
+ # enabled false
+ for line in stdout.split("\n"):
+ if line.startswith("enabled"):
+ if "true" in line:
+ enabled = True
+ if "temporary" in line:
+ temporary = True
+
+ startup_enabled = (enabled and not temporary) or (not enabled and temporary)
+
+ if self.enable and startup_enabled:
+ return
+ elif (not self.enable) and (not startup_enabled):
+ return
+
+ if not self.module.check_mode:
+ # Mark service as started or stopped (this will have the side effect of
+ # actually stopping or starting the service)
+ if self.enable:
+ subcmd = "enable -rs"
+ else:
+ subcmd = "disable -s"
+
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ self.changed = True
+
+ def service_control(self):
+ status = self.get_sunos_svcs_status()
+
+ # if starting or reloading, clear maintenance states
+ if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
+ rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
+ if rc != 0:
+ return rc, stdout, stderr
+ status = self.get_sunos_svcs_status()
+
+ if status in ['maintenance', 'degraded']:
+ self.module.fail_json(msg="Failed to bring service out of %s status." % status)
+
+ if self.action == 'start':
+ subcmd = "enable -rst"
+ elif self.action == 'stop':
+ subcmd = "disable -st"
+ elif self.action == 'reload':
+ subcmd = "refresh %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status == 'online':
+ subcmd = "restart %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status != 'online':
+ subcmd = "enable -rst"
+
+ return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+
+class AIX(Service):
+ """
+ This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
+ and refresh for service control. Enabling a service is currently not supported.
+ Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
+ commands)
+ """
+
+ platform = 'AIX'
+ distribution = None
+
+ def get_service_tools(self):
+ self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
+
+ if not self.lssrc_cmd:
+ self.module.fail_json(msg='unable to find lssrc binary')
+
+ self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
+
+ if not self.startsrc_cmd:
+ self.module.fail_json(msg='unable to find startsrc binary')
+
+ self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
+
+ if not self.stopsrc_cmd:
+ self.module.fail_json(msg='unable to find stopsrc binary')
+
+ self.refresh_cmd = self.module.get_bin_path('refresh', True)
+
+ if not self.refresh_cmd:
+ self.module.fail_json(msg='unable to find refresh binary')
+
+ def get_service_status(self):
+ status = self.get_aix_src_status()
+ # Only 'active' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'active':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_aix_src_status(self):
+ # Check subsystem status
+ rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ # If check for subsystem is not ok, check if service name is a
+ # group subsystem
+ rc, stdout, stderr = self.execute_command("%s -g %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ # Check all subsystem status, if one subsystem is not active
+ # the group is considered not active.
+ lines = stdout.splitlines()
+ for state in lines[1:]:
+ if state.split()[-1].strip() != "active":
+ status = state.split()[-1].strip()
+ break
+ else:
+ status = "active"
+
+ # status is one of: active, inoperative
+ return status
+ else:
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[-1]
+
+ # status is one of: active, inoperative
+ return status
+
+ def service_control(self):
+
+ # Check if service name is a subsystem of a group subsystem
+ rc, stdout, stderr = self.execute_command("%s -a" % (self.lssrc_cmd))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ lines = stdout.splitlines()
+ subsystems = []
+ groups = []
+ for line in lines[1:]:
+ subsystem = line.split()[0].strip()
+ group = line.split()[1].strip()
+ subsystems.append(subsystem)
+ if group:
+ groups.append(group)
+
+ # Define if service name parameter:
+ # -s subsystem or -g group subsystem
+ if self.name in subsystems:
+ srccmd_parameter = "-s"
+ elif self.name in groups:
+ srccmd_parameter = "-g"
+
+ if self.action == 'start':
+ srccmd = self.startsrc_cmd
+ elif self.action == 'stop':
+ srccmd = self.stopsrc_cmd
+ elif self.action == 'reload':
+ srccmd = self.refresh_cmd
+ elif self.action == 'restart':
+ self.execute_command("%s %s %s" % (self.stopsrc_cmd, srccmd_parameter, self.name))
+ if self.sleep:
+ time.sleep(self.sleep)
+ srccmd = self.startsrc_cmd
+
+ if self.arguments and self.action in ('start', 'restart'):
+ return self.execute_command("%s -a \"%s\" %s %s" % (srccmd, self.arguments, srccmd_parameter, self.name))
+ else:
+ return self.execute_command("%s %s %s" % (srccmd, srccmd_parameter, self.name))
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reloaded', 'restarted']),
+ sleep=dict(type='int'),
+ pattern=dict(type='str'),
+ enabled=dict(type='bool'),
+ runlevel=dict(type='str', default='default'),
+ arguments=dict(type='str', default='', aliases=['args']),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ service = Service(module)
+
+ module.debug('Service instantiated - platform %s' % service.platform)
+ if service.distribution:
+ module.debug('Service instantiated - distribution %s' % service.distribution)
+
+ rc = 0
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = service.name
+
+ # Find service management tools
+ service.get_service_tools()
+
+ # Enable/disable service startup at boot if requested
+ if service.module.params['enabled'] is not None:
+ # FIXME: ideally this should detect if we need to toggle the enablement state, though
+ # it's unlikely the changed handler would need to fire in this case so it's a minor thing.
+ service.service_enable()
+ result['enabled'] = service.enable
+
+ if module.params['state'] is None:
+ # Not changing the running state, so bail out now.
+ result['changed'] = service.changed
+ module.exit_json(**result)
+
+ result['state'] = service.state
+
+ # Collect service status
+ if service.pattern:
+ service.check_ps()
+ else:
+ service.get_service_status()
+
+ # Calculate if request will change service state
+ service.check_service_changed()
+
+ # Modify service state if necessary
+ (rc, out, err) = service.modify_service_state()
+
+ if rc != 0:
+ if err and "Job is already running" in err:
+ # upstart got confused, one such possibility is MySQL on Ubuntu 12.04
+ # where status may report it has no start/stop links and we could
+ # not get accurate status
+ pass
+ else:
+ if err:
+ module.fail_json(msg=err)
+ else:
+ module.fail_json(msg=out)
+
+ result['changed'] = service.changed | service.svc_change
+ if service.module.params['enabled'] is not None:
+ result['enabled'] = service.module.params['enabled']
+
+ if not service.module.params['state']:
+ status = service.get_service_status()
+ if status is None:
+ result['state'] = 'absent'
+ elif status is False:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+ else:
+ # as we may have just bounced the service the service command may not
+ # report accurate state at this moment so just show what we ran
+ if service.module.params['state'] in ['reloaded', 'restarted', 'started']:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/service_facts.py b/lib/ansible/modules/service_facts.py
new file mode 100644
index 0000000..d2fbfad
--- /dev/null
+++ b/lib/ansible/modules/service_facts.py
@@ -0,0 +1,411 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# originally copied from AWX's scan_services module to bring this functionality
+# into Core
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service_facts
+short_description: Return service state information as fact data
+description:
+ - Return service state information as fact data for various service management utilities.
+version_added: "2.5"
+requirements: ["Any of the following supported init systems: systemd, sysv, upstart, openrc, AIX SRC"]
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix
+notes:
+ - When accessing the C(ansible_facts.services) facts collected by this module,
+ it is recommended to not use "dot notation" because services can have a C(-)
+ character in their name which would result in invalid "dot notation", such as
+ C(ansible_facts.services.zuul-gateway). It is instead recommended to
+ using the string value of the service name as the key in order to obtain
+ the fact data value like C(ansible_facts.services['zuul-gateway'])
+ - AIX SRC was added in version 2.11.
+author:
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = r'''
+- name: Populate service facts
+ ansible.builtin.service_facts:
+
+- name: Print service facts
+ ansible.builtin.debug:
+ var: ansible_facts.services
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Facts to add to ansible_facts about the services on the system
+ returned: always
+ type: complex
+ contains:
+ services:
+ description: States of the services with service name as key.
+ returned: always
+ type: complex
+ contains:
+ source:
+ description:
+ - Init system of the service.
+ - One of C(rcctl), C(systemd), C(sysv), C(upstart), C(src).
+ returned: always
+ type: str
+ sample: sysv
+ state:
+ description:
+ - State of the service.
+ - 'This commonly includes (but is not limited to) the following: C(failed), C(running), C(stopped) or C(unknown).'
+ - Depending on the used init system additional states might be returned.
+ returned: always
+ type: str
+ sample: running
+ status:
+ description:
+ - State of the service.
+ - Either C(enabled), C(disabled), C(static), C(indirect) or C(unknown).
+ returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart or OpenBSD
+ type: str
+ sample: enabled
+ name:
+ description: Name of the service.
+ returned: always
+ type: str
+ sample: arp-ethers.service
+'''
+
+
+import os
+import platform
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+
+
+class BaseService(object):
+
+ def __init__(self, module):
+ self.module = module
+
+
+class ServiceScanService(BaseService):
+
+ def _list_sysvinit(self, services):
+ rc, stdout, stderr = self.module.run_command("%s --status-all" % self.service_path)
+ if rc == 4 and not os.path.exists('/etc/init.d'):
+ # This function is not intended to run on Red Hat but it could happen
+ # if `chkconfig` is not installed. `service` on RHEL9 returns rc 4
+ # when /etc/init.d is missing, add the extra guard of checking /etc/init.d
+ # instead of solely relying on rc == 4
+ return
+ if rc != 0:
+ self.module.warn("Unable to query 'service' tool (%s): %s" % (rc, stderr))
+ p = re.compile(r'^\s*\[ (?P<state>\+|\-) \]\s+(?P<name>.+)$', flags=re.M)
+ for match in p.finditer(stdout):
+ service_name = match.group('name')
+ if match.group('state') == "+":
+ service_state = "running"
+ else:
+ service_state = "stopped"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
+
+ def _list_upstart(self, services):
+ p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
+ rc, stdout, stderr = self.module.run_command("%s list" % self.initctl_path)
+ if rc != 0:
+ self.module.warn('Unable to query upstart for service data: %s' % stderr)
+ else:
+ real_stdout = stdout.replace("\r", "")
+ for line in real_stdout.split("\n"):
+ m = p.match(line)
+ if not m:
+ continue
+ service_name = m.group('name')
+ service_goal = m.group('goal')
+ service_state = m.group('state')
+ if m.group('pid'):
+ pid = m.group('pid')
+ else:
+ pid = None # NOQA
+ payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
+ services[service_name] = payload
+
+ def _list_rh(self, services):
+
+ p = re.compile(
+ r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
+ r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
+ rc, stdout, stderr = self.module.run_command('%s' % self.chkconfig_path, use_unsafe_shell=True)
+ # Check for special cases where stdout does not fit pattern
+ match_any = False
+ for line in stdout.split('\n'):
+ if p.match(line):
+ match_any = True
+ if not match_any:
+ p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
+ match_any = False
+ for line in stdout.split('\n'):
+ if p_simple.match(line):
+ match_any = True
+ if match_any:
+ # Try extra flags " -l --allservices" needed for SLES11
+ rc, stdout, stderr = self.module.run_command('%s -l --allservices' % self.chkconfig_path, use_unsafe_shell=True)
+ elif '--list' in stderr:
+ # Extra flag needed for RHEL5
+ rc, stdout, stderr = self.module.run_command('%s --list' % self.chkconfig_path, use_unsafe_shell=True)
+
+ for line in stdout.split('\n'):
+ m = p.match(line)
+ if m:
+ service_name = m.group('service')
+ service_state = 'stopped'
+ service_status = "disabled"
+ if m.group('rl3') == 'on':
+ service_status = "enabled"
+ rc, stdout, stderr = self.module.run_command('%s %s status' % (self.service_path, service_name), use_unsafe_shell=True)
+ service_state = rc
+ if rc in (0,):
+ service_state = 'running'
+ # elif rc in (1,3):
+ else:
+ output = stderr.lower()
+ for x in ('root', 'permission', 'not in sudoers'):
+ if x in output:
+ self.module.warn('Insufficient permissions to query sysV service "%s" and their states' % service_name)
+ break
+ else:
+ service_state = 'stopped'
+
+ service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
+ services[service_name] = service_data
+
+ def _list_openrc(self, services):
+ all_services_runlevels = {}
+ rc, stdout, stderr = self.module.run_command("%s -a -s -m 2>&1 | grep '^ ' | tr -d '[]'" % self.rc_status_path, use_unsafe_shell=True)
+ rc_u, stdout_u, stderr_u = self.module.run_command("%s show -v 2>&1 | grep '|'" % self.rc_update_path, use_unsafe_shell=True)
+ for line in stdout_u.split('\n'):
+ line_data = line.split('|')
+ if len(line_data) < 2:
+ continue
+ service_name = line_data[0].strip()
+ runlevels = line_data[1].strip()
+ if not runlevels:
+ all_services_runlevels[service_name] = None
+ else:
+ all_services_runlevels[service_name] = runlevels.split()
+ for line in stdout.split('\n'):
+ line_data = line.split()
+ if len(line_data) < 2:
+ continue
+ service_name = line_data[0]
+ service_state = line_data[1]
+ service_runlevels = all_services_runlevels[service_name]
+ service_data = {"name": service_name, "runlevels": service_runlevels, "state": service_state, "source": "openrc"}
+ services[service_name] = service_data
+
+ def gather_services(self):
+ services = {}
+
+ # find cli tools if available
+ self.service_path = self.module.get_bin_path("service")
+ self.chkconfig_path = self.module.get_bin_path("chkconfig")
+ self.initctl_path = self.module.get_bin_path("initctl")
+ self.rc_status_path = self.module.get_bin_path("rc-status")
+ self.rc_update_path = self.module.get_bin_path("rc-update")
+
+ # TODO: review conditionals ... they should not be this 'exclusive'
+ if self.service_path and self.chkconfig_path is None and self.rc_status_path is None:
+ self._list_sysvinit(services)
+ if self.initctl_path and self.chkconfig_path is None:
+ self._list_upstart(services)
+ elif self.chkconfig_path:
+ self._list_rh(services)
+ elif self.rc_status_path is not None and self.rc_update_path is not None:
+ self._list_openrc(services)
+ return services
+
+
+class SystemctlScanService(BaseService):
+
+ BAD_STATES = frozenset(['not-found', 'masked', 'failed'])
+
+ def systemd_enabled(self):
+ # Check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+ for line in f:
+ if 'systemd' in line:
+ return True
+ return False
+
+ def _list_from_units(self, systemctl_path, services):
+
+ # list units as systemd sees them
+ rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.warn("Could not list units from systemd: %s" % stderr)
+ else:
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
+
+ state_val = "stopped"
+ status_val = "unknown"
+ fields = line.split()
+ for bad in self.BAD_STATES:
+ if bad in fields: # dot is 0
+ status_val = bad
+ fields = fields[1:]
+ break
+ else:
+ # active/inactive
+ status_val = fields[2]
+
+ # array is normalize so predictable now
+ service_name = fields[0]
+ if fields[3] == "running":
+ state_val = "running"
+
+ services[service_name] = {"name": service_name, "state": state_val, "status": status_val, "source": "systemd"}
+
+ def _list_from_unit_files(self, systemctl_path, services):
+
+ # now try unit files for complete picture and final 'status'
+ rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.warn("Could not get unit files data from systemd: %s" % stderr)
+ else:
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line]:
+ # there is one more column (VENDOR PRESET) from `systemctl list-unit-files` for systemd >= 245
+ try:
+ service_name, status_val = line.split()[:2]
+ except IndexError:
+ self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
+ if service_name not in services:
+ rc, stdout, stderr = self.module.run_command("%s show %s --property=ActiveState" % (systemctl_path, service_name), use_unsafe_shell=True)
+ state = 'unknown'
+ if not rc and stdout != '':
+ state = stdout.replace('ActiveState=', '').rstrip()
+ services[service_name] = {"name": service_name, "state": state, "status": status_val, "source": "systemd"}
+ elif services[service_name]["status"] not in self.BAD_STATES:
+ services[service_name]["status"] = status_val
+
+ def gather_services(self):
+
+ services = {}
+ if self.systemd_enabled():
+ systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
+ if systemctl_path:
+ self._list_from_units(systemctl_path, services)
+ self._list_from_unit_files(systemctl_path, services)
+
+ return services
+
+
+class AIXScanService(BaseService):
+
+ def gather_services(self):
+
+ services = {}
+ if platform.system() == 'AIX':
+ lssrc_path = self.module.get_bin_path("lssrc")
+ if lssrc_path:
+ rc, stdout, stderr = self.module.run_command("%s -a" % lssrc_path)
+ if rc != 0:
+ self.module.warn("lssrc could not retrieve service data (%s): %s" % (rc, stderr))
+ else:
+ for line in stdout.split('\n'):
+ line_data = line.split()
+ if len(line_data) < 2:
+ continue # Skipping because we expected more data
+ if line_data[0] == "Subsystem":
+ continue # Skip header
+ service_name = line_data[0]
+ if line_data[-1] == "active":
+ service_state = "running"
+ elif line_data[-1] == "inoperative":
+ service_state = "stopped"
+ else:
+ service_state = "unknown"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "src"}
+ return services
+
+
+class OpenBSDScanService(BaseService):
+
+ def query_rcctl(self, cmd):
+ svcs = []
+ rc, stdout, stderr = self.module.run_command("%s ls %s" % (self.rcctl_path, cmd))
+ if 'needs root privileges' in stderr.lower():
+ self.module.warn('rcctl requires root privileges')
+ else:
+ for svc in stdout.split('\n'):
+ if svc == '':
+ continue
+ else:
+ svcs.append(svc)
+ return svcs
+
+ def gather_services(self):
+
+ services = {}
+ self.rcctl_path = self.module.get_bin_path("rcctl")
+ if self.rcctl_path:
+
+ for svc in self.query_rcctl('all'):
+ services[svc] = {'name': svc, 'source': 'rcctl'}
+
+ for svc in self.query_rcctl('on'):
+ services[svc].update({'status': 'enabled'})
+
+ for svc in self.query_rcctl('started'):
+ services[svc].update({'state': 'running'})
+
+ # Based on the list of services that are enabled, determine which are disabled
+ [services[svc].update({'status': 'disabled'}) for svc in services if services[svc].get('status') is None]
+
+ # and do the same for those are aren't running
+ [services[svc].update({'state': 'stopped'}) for svc in services if services[svc].get('state') is None]
+
+ # Override the state for services which are marked as 'failed'
+ for svc in self.query_rcctl('failed'):
+ services[svc].update({'state': 'failed'})
+
+ return services
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale)
+ service_modules = (ServiceScanService, SystemctlScanService, AIXScanService, OpenBSDScanService)
+ all_services = {}
+ for svc_module in service_modules:
+ svcmod = svc_module(module)
+ svc = svcmod.gather_services()
+ if svc:
+ all_services.update(svc)
+ if len(all_services) == 0:
+ results = dict(skipped=True, msg="Failed to find any services. This can be due to privileges or some other configuration issue.")
+ else:
+ results = dict(ansible_facts=dict(services=all_services))
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/set_fact.py b/lib/ansible/modules/set_fact.py
new file mode 100644
index 0000000..5cb1f7d
--- /dev/null
+++ b/lib/ansible/modules/set_fact.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_fact
+short_description: Set host variable(s) and fact(s).
+version_added: "1.2"
+description:
+ - This action allows setting variables associated to the current host.
+ - These variables will be available to subsequent plays during an ansible-playbook run via the host they were set on.
+ - Set C(cacheable) to C(true) to save variables across executions using a fact cache.
+ Variables will keep the set_fact precedence for the current run, but will used 'cached fact' precedence for subsequent ones.
+ - Per the standard Ansible variable precedence rules, other types of variables have a higher priority, so this value may be overridden.
+options:
+ key_value:
+ description:
+ - "The C(set_fact) module takes C(key=value) pairs or C(key: value) (YAML notation) as variables to set in the playbook scope.
+ The 'key' is the resulting variable name and the value is, of course, the value of said variable."
+ - You can create multiple variables at once, by supplying multiple pairs, but do NOT mix notations.
+ required: true
+ cacheable:
+ description:
+ - This boolean converts the variable into an actual 'fact' which will also be added to the fact cache.
+ It does not enable fact caching across runs, it just means it will work with it if already enabled.
+ - Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
+ (by 7 steps) of the variable created.
+ U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable)
+ - "This actually creates 2 copies of the variable, a normal 'set_fact' host variable with high precedence and
+ a lower 'ansible_fact' one that is available for persistence via the facts cache plugin.
+ This creates a possibly confusing interaction with C(meta: clear_facts) as it will remove the 'ansible_fact' but not the host variable."
+ type: bool
+ default: no
+ version_added: "2.4"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overriden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ check_mode:
+ support: full
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ delegation:
+ details:
+ - while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
+ - connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
+ support: partial
+ diff_mode:
+ support: none
+notes:
+ - Because of the nature of tasks, set_fact will produce 'static' values for a variable.
+ Unlike normal 'lazy' variables, the value gets evaluated and templated on assignment.
+ - Some boolean values (yes, no, true, false) will always be converted to boolean type,
+ unless C(DEFAULT_JINJA2_NATIVE) is enabled. This is done so the C(var=value) booleans,
+ otherwise it would only be able to create strings, but it also prevents using those values to create YAML strings.
+ Using the setting will restrict k=v to strings, but will allow you to specify string or boolean in YAML.
+ - "To create lists/arrays or dictionary/hashes use YAML notation C(var: [val1, val2])."
+ - Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name.
+seealso:
+- module: ansible.builtin.include_vars
+- ref: ansible_variable_precedence
+ description: More information related to variable precedence and which type of variable wins over others.
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Setting host facts using key=value pairs, this format can only create strings or booleans
+ ansible.builtin.set_fact: one_fact="something" other_fact="{{ local_var }}"
+
+- name: Setting host facts using complex arguments
+ ansible.builtin.set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+
+- name: Setting facts so that they will be persisted in the fact cache
+ ansible.builtin.set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ cacheable: yes
+
+- name: Creating list and dictionary variables
+ ansible.builtin.set_fact:
+ one_dict:
+ something: here
+ other: there
+ one_list:
+ - a
+ - b
+ - c
+# As of Ansible 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
+# to proper boolean values when using the key=value syntax, however it is still
+# recommended that booleans be set using the complex argument style:
+- name: Setting booleans using complex argument style
+ ansible.builtin.set_fact:
+ one_fact: yes
+ other_fact: no
+
+- name: Creating list and dictionary variables using 'shorthand' YAML
+ ansible.builtin.set_fact:
+ two_dict: {'something': here2, 'other': somewhere}
+ two_list: [1,2,3]
+'''
diff --git a/lib/ansible/modules/set_stats.py b/lib/ansible/modules/set_stats.py
new file mode 100644
index 0000000..16d7bfe
--- /dev/null
+++ b/lib/ansible/modules/set_stats.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible RedHat, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_stats
+short_description: Define and display stats for the current ansible run
+description:
+ - This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
+ - This module is also supported for Windows targets.
+author: Brian Coca (@bcoca)
+options:
+ data:
+ description:
+ - A dictionary of which each key represents a stat (or variable) you want to keep track of.
+ type: dict
+ required: true
+ per_host:
+ description:
+ - whether the stats are per host or for all hosts in the run.
+ type: bool
+ default: no
+ aggregate:
+ description:
+ - Whether the provided value is aggregated to the existing stat C(true) or will replace it C(false).
+ type: bool
+ default: yes
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+ - action_core
+attributes:
+ action:
+ details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
+ support: partial
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ core:
+ details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
+ support: partial
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+notes:
+ - In order for custom stats to be displayed, you must set C(show_custom_stats) in section C([defaults]) in C(ansible.cfg)
+ or by defining environment variable C(ANSIBLE_SHOW_CUSTOM_STATS) to C(true). See the C(default) callback plugin for details.
+version_added: "2.3"
+'''
+
+EXAMPLES = r'''
+- name: Aggregating packages_installed stat per host
+ ansible.builtin.set_stats:
+ data:
+ packages_installed: 31
+ per_host: yes
+
+- name: Aggregating random stats for all hosts using complex arguments
+ ansible.builtin.set_stats:
+ data:
+ one_stat: 11
+ other_stat: "{{ local_var * 2 }}"
+ another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+ per_host: no
+
+- name: Setting stats (not aggregating)
+ ansible.builtin.set_stats:
+ data:
+ the_answer: 42
+ aggregate: no
+'''
diff --git a/lib/ansible/modules/setup.py b/lib/ansible/modules/setup.py
new file mode 100644
index 0000000..df2a67f
--- /dev/null
+++ b/lib/ansible/modules/setup.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: setup
+version_added: historical
+short_description: Gathers facts about remote hosts
+options:
+ gather_subset:
+ version_added: "2.1"
+ description:
+ - "If supplied, restrict the additional facts collected to the given subset.
+ Possible values: C(all), C(all_ipv4_addresses), C(all_ipv6_addresses), C(apparmor), C(architecture),
+ C(caps), C(chroot),C(cmdline), C(date_time), C(default_ipv4), C(default_ipv6), C(devices),
+ C(distribution), C(distribution_major_version), C(distribution_release), C(distribution_version),
+ C(dns), C(effective_group_ids), C(effective_user_id), C(env), C(facter), C(fips), C(hardware),
+ C(interfaces), C(is_chroot), C(iscsi), C(kernel), C(local), C(lsb), C(machine), C(machine_id),
+ C(mounts), C(network), C(ohai), C(os_family), C(pkg_mgr), C(platform), C(processor), C(processor_cores),
+ C(processor_count), C(python), C(python_version), C(real_user_id), C(selinux), C(service_mgr),
+ C(ssh_host_key_dsa_public), C(ssh_host_key_ecdsa_public), C(ssh_host_key_ed25519_public),
+ C(ssh_host_key_rsa_public), C(ssh_host_pub_keys), C(ssh_pub_keys), C(system), C(system_capabilities),
+ C(system_capabilities_enforced), C(user), C(user_dir), C(user_gecos), C(user_gid), C(user_id),
+ C(user_shell), C(user_uid), C(virtual), C(virtualization_role), C(virtualization_type).
+ Can specify a list of values to specify a larger subset.
+ Values can also be used with an initial C(!) to specify that
+ that specific subset should not be collected. For instance:
+ C(!hardware,!network,!virtual,!ohai,!facter). If C(!all) is specified
+ then only the min subset is collected. To avoid collecting even the
+ min subset, specify C(!all,!min). To collect only specific facts,
+ use C(!all,!min), and specify the particular fact subsets.
+ Use the filter parameter if you do not want to display some collected
+ facts."
+ type: list
+ elements: str
+ default: "all"
+ gather_timeout:
+ version_added: "2.2"
+ description:
+ - Set the default timeout in seconds for individual fact gathering.
+ type: int
+ default: 10
+ filter:
+ version_added: "1.1"
+ description:
+ - If supplied, only return facts that match one of the shell-style
+ (fnmatch) pattern. An empty list basically means 'no filter'.
+ As of Ansible 2.11, the type has changed from string to list
+ and the default has became an empty list. A simple string is
+ still accepted and works as a single pattern. The behaviour
+ prior to Ansible 2.11 remains.
+ type: list
+ elements: str
+ default: []
+ fact_path:
+ version_added: "1.3"
+ description:
+ - Path used for local ansible facts (C(*.fact)) - files in this dir
+ will be run (if executable) and their results be added to C(ansible_local) facts.
+ If a file is not executable it is read instead.
+ File/results format can be JSON or INI-format. The default C(fact_path) can be
+ specified in C(ansible.cfg) for when setup is automatically called as part of
+ C(gather_facts).
+ NOTE - For windows clients, the results will be added to a variable named after the
+ local file (without extension suffix), rather than C(ansible_local).
+ - Since Ansible 2.1, Windows hosts can use C(fact_path). Make sure that this path
+ exists on the target host. Files in this path MUST be PowerShell scripts C(.ps1)
+ which outputs an object. This object will be formatted by Ansible as json so the
+ script should be outputting a raw hashtable, array, or other primitive object.
+ type: path
+ default: /etc/ansible/facts.d
+description:
+ - This module is automatically called by playbooks to gather useful
+ variables about remote hosts that can be used in playbooks. It can also be
+ executed directly by C(/usr/bin/ansible) to check what variables are
+ available to a host. Ansible provides many I(facts) about the system,
+ automatically.
+ - This module is also supported for Windows targets.
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.facts
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ facts:
+ support: full
+ platform:
+ platforms: posix, windows
+notes:
+ - More ansible facts will be added with successive releases. If I(facter) or
+ I(ohai) are installed, variables from these programs will also be snapshotted
+ into the JSON file for usage in templating. These variables are prefixed
+ with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
+ bubbled up to the caller. Using the ansible facts and choosing to not
+ install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
+ remote systems. (See also M(community.general.facter) and M(community.general.ohai).)
+ - The filter option filters only the first level subkey below ansible_facts.
+ - If the target host is Windows, you will not currently have the ability to use
+ C(filter) as this is provided by a simpler implementation of the module.
+ - This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
+ - For more information about delegated facts,
+ please check U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_delegation.html#delegating-facts).
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = r"""
+# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
+# ansible all -m ansible.builtin.setup --tree /tmp/facts
+
+# Display only facts regarding memory found by ansible on all hosts and output them.
+# ansible all -m ansible.builtin.setup -a 'filter=ansible_*_mb'
+
+# Display only facts returned by facter.
+# ansible all -m ansible.builtin.setup -a 'filter=facter_*'
+
+# Collect only facts returned by facter.
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,facter'
+
+- name: Collect only facts returned by facter
+ ansible.builtin.setup:
+ gather_subset:
+ - '!all'
+ - '!<any valid subset>'
+ - facter
+
+- name: Filter and return only selected facts
+ ansible.builtin.setup:
+ filter:
+ - 'ansible_distribution'
+ - 'ansible_machine_id'
+ - 'ansible_*_mb'
+
+# Display only facts about certain interfaces.
+# ansible all -m ansible.builtin.setup -a 'filter=ansible_eth[0-2]'
+
+# Restrict additional gathered facts to network and virtual (includes default minimum facts)
+# ansible all -m ansible.builtin.setup -a 'gather_subset=network,virtual'
+
+# Collect only network and virtual (excludes default minimum facts)
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,network,virtual'
+
+# Do not call puppet facter or ohai even if present.
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!facter,!ohai'
+
+# Only collect the default minimum amount of facts:
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all'
+
+# Collect no facts, even the default minimum subset of facts:
+# ansible all -m ansible.builtin.setup -a 'gather_subset=!all,!min'
+
+# Display facts from Windows hosts with custom facts stored in C:\custom_facts.
+# ansible windows -m ansible.builtin.setup -a "fact_path='c:\custom_facts'"
+
+# Gathers facts for the machines in the dbservers group (a.k.a Delegating facts)
+- hosts: app_servers
+ tasks:
+ - name: Gather facts from db servers
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups['dbservers'] }}"
+"""
+
+# import module snippets
+from ..module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.facts import ansible_collector, default_collectors
+from ansible.module_utils.facts.collector import CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ gather_subset=dict(default=["all"], required=False, type='list', elements='str'),
+ gather_timeout=dict(default=10, required=False, type='int'),
+ filter=dict(default=[], required=False, type='list', elements='str'),
+ fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ gather_subset = module.params['gather_subset']
+ gather_timeout = module.params['gather_timeout']
+ filter_spec = module.params['filter']
+
+ # TODO: this mimics existing behavior where gather_subset=["!all"] actually means
+ # to collect nothing except for the below list
+ # TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
+ # some tweaking on how gather_subset operations are performed
+ minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
+ 'distribution', 'dns', 'env', 'fips', 'local',
+ 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
+ 'service_mgr', 'ssh_pub_keys', 'user'])
+
+ all_collector_classes = default_collectors.collectors
+
+ # rename namespace_name to root_key?
+ namespace = PrefixFactNamespace(namespace_name='ansible',
+ prefix='ansible_')
+
+ try:
+ fact_collector = ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+ except (TypeError, CollectorNotFoundError, CycleFoundInFactDeps, UnresolvedFactDep) as e:
+ # bad subset given, collector, idk, deps declared but not found
+ module.fail_json(msg=to_text(e))
+
+ facts_dict = fact_collector.collect(module=module)
+
+ module.exit_json(ansible_facts=facts_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/shell.py b/lib/ansible/modules/shell.py
new file mode 100644
index 0000000..52fda1b
--- /dev/null
+++ b/lib/ansible/modules/shell.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# There is no actual shell module source, when you use 'shell' in ansible,
+# it runs the 'command' module with special arguments and it behaves differently.
+# See the command source and the comment "#USE_SHELL".
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: shell
+short_description: Execute shell commands on targets
+description:
+ - The C(shell) module takes the command name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - It is almost exactly like the M(ansible.builtin.command) module but runs
+ the command through a shell (C(/bin/sh)) on the remote node.
+ - For Windows targets, use the M(ansible.windows.win_shell) module instead.
+version_added: "0.2"
+options:
+ free_form:
+ description:
+ - The shell module takes a free form command to run, as a string.
+ - There is no actual parameter named 'free form'.
+ - See the examples on how to use this module.
+ type: str
+ cmd:
+ type: str
+ description:
+ - The command to run followed by optional arguments.
+ creates:
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ type: path
+ version_added: "0.8"
+ chdir:
+ description:
+ - Change into this directory before running the command.
+ type: path
+ version_added: "0.6"
+ executable:
+ description:
+ - Change the shell used to execute the command.
+ - This expects an absolute path to the executable.
+ type: path
+ version_added: "0.9"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: "2.4"
+ stdin_add_newline:
+ description:
+ - Whether to append a newline to stdin data.
+ type: bool
+ default: yes
+ version_added: "2.8"
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.raw
+attributes:
+ check_mode:
+ details: while the command itself is arbitrary and cannot be subject to the check mode semantics it adds C(creates)/C(removes) options as a workaround
+ support: partial
+ diff_mode:
+ support: none
+ platform:
+ support: full
+ platforms: posix
+ raw:
+ support: full
+notes:
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) module instead. Best practices
+ when writing playbooks will follow the trend of using M(ansible.builtin.command)
+ unless the M(ansible.builtin.shell) module is explicitly required. When running ad-hoc
+ commands, use your best judgement.
+ - To sanitize any variables passed to the shell module, you should use
+ C({{ var | quote }}) instead of just C({{ var }}) to make sure they
+ do not include evil things like semicolons.
+ - An alternative to using inline shell scripts with this module is to use
+ the M(ansible.builtin.script) module possibly together with the M(ansible.builtin.template) module.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Execute the command in remote shell; stdout goes to the specified file on the remote
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+
+- name: Change the working directory to somedir/ before executing the command
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+
+# You can also use the 'args' form to provide the options.
+- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist
+ ansible.builtin.shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+ creates: somelog.txt
+
+# You can also use the 'cmd' parameter instead of free form format.
+- name: This command will change the working directory to somedir/
+ ansible.builtin.shell:
+ cmd: ls -l | grep log
+ chdir: somedir/
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ ansible.builtin.shell: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Run a command using a templated variable (always use quote filter to avoid injection)
+ ansible.builtin.shell: cat {{ myfile|quote }}
+
+# You can use shell to run other executables to perform actions inline
+- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
+ ansible.builtin.shell: |
+ set timeout 300
+ spawn ssh admin@{{ cimc_host }}
+
+ expect "password:"
+ send "{{ cimc_password }}\n"
+
+ expect "\n{{ cimc_name }}"
+ send "connect host\n"
+
+ expect "pxeboot.n12"
+ send "\n"
+
+ exit 0
+ args:
+ executable: /usr/bin/expect
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines.
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
diff --git a/lib/ansible/modules/slurp.py b/lib/ansible/modules/slurp.py
new file mode 100644
index 0000000..55abfeb
--- /dev/null
+++ b/lib/ansible/modules/slurp.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: slurp
+version_added: historical
+short_description: Slurps a file from remote nodes
+description:
+ - This module works like M(ansible.builtin.fetch). It is used for fetching a base64-
+ encoded blob containing the data in a remote file.
+ - This module is also supported for Windows targets.
+options:
+ src:
+ description:
+ - The file on the remote system to fetch. This I(must) be a file, not a directory.
+ type: path
+ required: true
+ aliases: [ path ]
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix, windows
+notes:
+ - This module returns an 'in memory' base64 encoded version of the file, take
+ into account that this will require at least twice the RAM as the original file size.
+seealso:
+- module: ansible.builtin.fetch
+author:
+ - Ansible Core Team
+ - Michael DeHaan (@mpdehaan)
+'''
+
+EXAMPLES = r'''
+- name: Find out what the remote machine's mounts are
+ ansible.builtin.slurp:
+ src: /proc/mounts
+ register: mounts
+
+- name: Print returned information
+ ansible.builtin.debug:
+ msg: "{{ mounts['content'] | b64decode }}"
+
+# From the commandline, find the pid of the remote machine's sshd
+# $ ansible host -m ansible.builtin.slurp -a 'src=/var/run/sshd.pid'
+# host | SUCCESS => {
+# "changed": false,
+# "content": "MjE3OQo=",
+# "encoding": "base64",
+# "source": "/var/run/sshd.pid"
+# }
+# $ echo MjE3OQo= | base64 -d
+# 2179
+'''
+
+RETURN = r'''
+content:
+ description: Encoded file content
+ returned: success
+ type: str
+ sample: "MjE3OQo="
+encoding:
+ description: Type of encoding used for file
+ returned: success
+ type: str
+ sample: "base64"
+source:
+ description: Actual path of file slurped
+ returned: success
+ type: str
+ sample: "/var/run/sshd.pid"
+'''
+
+import base64
+import errno
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ ),
+ supports_check_mode=True,
+ )
+ source = module.params['src']
+
+ try:
+ with open(source, 'rb') as source_fh:
+ source_content = source_fh.read()
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ msg = "file not found: %s" % source
+ elif e.errno == errno.EACCES:
+ msg = "file is not readable: %s" % source
+ elif e.errno == errno.EISDIR:
+ msg = "source is a directory and must be a file: %s" % source
+ else:
+ msg = "unable to slurp file: %s" % to_native(e, errors='surrogate_then_replace')
+
+ module.fail_json(msg)
+
+ data = base64.b64encode(source_content)
+
+ module.exit_json(content=data, source=source, encoding='base64')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/stat.py b/lib/ansible/modules/stat.py
new file mode 100644
index 0000000..45ca78b
--- /dev/null
+++ b/lib/ansible/modules/stat.py
@@ -0,0 +1,560 @@
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: stat
+version_added: "1.3"
+short_description: Retrieve file or file system status
+description:
+ - Retrieves facts for a file similar to the Linux/Unix 'stat' command.
+ - For Windows targets, use the M(ansible.windows.win_stat) module instead.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ type: path
+ required: true
+ aliases: [ dest, name ]
+ follow:
+ description:
+ - Whether to follow symlinks.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file.
+ type: bool
+ default: yes
+ version_added: "1.8"
+ checksum_algorithm:
+ description:
+ - Algorithm to determine checksum of file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ - The remote host has to support the hashing method specified, C(md5)
+ can be unavailable if the host is FIPS-140 compliant.
+ type: str
+ choices: [ md5, sha1, sha224, sha256, sha384, sha512 ]
+ default: sha1
+ aliases: [ checksum, checksum_algo ]
+ version_added: "2.0"
+ get_mime:
+ description:
+ - Use file magic and return data about the nature of the file. this uses
+ the 'file' utility found on most Linux/Unix systems.
+ - This will add both C(mime_type) and C(charset) fields to the return, if possible.
+ - In Ansible 2.3 this option changed from I(mime) to I(get_mime) and the default changed to C(true).
+ type: bool
+ default: yes
+ aliases: [ mime, mime_type, mime-type ]
+ version_added: "2.1"
+ get_attributes:
+ description:
+ - Get file attributes using lsattr tool if present.
+ type: bool
+ default: yes
+ aliases: [ attr, attributes ]
+ version_added: "2.3"
+extends_documentation_fragment:
+ - action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_stat
+author: Bruce Pennypacker (@bpennypacker)
+'''
+
+EXAMPLES = r'''
+# Obtain the stats of /etc/foo.conf, and check that the file still belongs
+# to 'root'. Fail otherwise.
+- name: Get stats of a file
+ ansible.builtin.stat:
+ path: /etc/foo.conf
+ register: st
+- name: Fail if the file does not belong to 'root'
+ ansible.builtin.fail:
+ msg: "Whoops! file ownership has changed"
+ when: st.stat.pw_name != 'root'
+
+# Determine if a path exists and is a symlink. Note that if the path does
+# not exist, and we test sym.stat.islnk, it will fail with an error. So
+# therefore, we must test whether it is defined.
+# Run this to understand the structure, the skipped ones do not pass the
+# check performed by 'when'
+- name: Get stats of the FS object
+ ansible.builtin.stat:
+ path: /path/to/something
+ register: sym
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "islnk isn't defined (path doesn't exist)"
+ when: sym.stat.islnk is not defined
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "islnk is defined (path must exist)"
+ when: sym.stat.islnk is defined
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and is a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk
+
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and isn't a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk == False
+
+
+# Determine if a path exists and is a directory. Note that we need to test
+# both that p.stat.isdir actually exists, and also that it's set to true.
+- name: Get stats of the FS object
+ ansible.builtin.stat:
+ path: /path/to/something
+ register: p
+- name: Print a debug message
+ ansible.builtin.debug:
+ msg: "Path exists and is a directory"
+ when: p.stat.isdir is defined and p.stat.isdir
+
+- name: Do not calculate the checksum
+ ansible.builtin.stat:
+ path: /path/to/myhugefile
+ get_checksum: no
+
+- name: Use sha256 to calculate the checksum
+ ansible.builtin.stat:
+ path: /path/to/something
+ checksum_algorithm: sha256
+'''
+
+RETURN = r'''
+stat:
+ description: Dictionary containing all the stat data, some platforms might add additional fields.
+ returned: success
+ type: complex
+ contains:
+ exists:
+ description: If the destination path actually exists or not
+ returned: success
+ type: bool
+ sample: True
+ path:
+ description: The full path of the file/object to get the facts of
+ returned: success and if path exists
+ type: str
+ sample: '/path/to/file'
+ mode:
+ description: Unix permissions of the file in octal representation as a string
+ returned: success, path exists and user can read stats
+ type: str
+ sample: 1755
+ isdir:
+ description: Tells you if the path is a directory
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ ischr:
+ description: Tells you if the path is a character device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isblk:
+ description: Tells you if the path is a block device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isreg:
+ description: Tells you if the path is a regular file
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isfifo:
+ description: Tells you if the path is a named pipe
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ islnk:
+ description: Tells you if the path is a symbolic link
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ issock:
+ description: Tells you if the path is a unix domain socket
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ uid:
+ description: Numeric id representing the file owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ gid:
+ description: Numeric id representing the group of the owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ size:
+ description: Size in bytes for a plain file, amount of data for some special files
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 203
+ inode:
+ description: Inode number of the path
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 12758
+ dev:
+ description: Device the inode resides on
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 33
+ nlink:
+ description: Number of links to the inode (hard links)
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1
+ atime:
+ description: Time of last access
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ mtime:
+ description: Time of last modification
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ ctime:
+ description: Time of last metadata update or creation (depends on OS)
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ wusr:
+ description: Tells you if the owner has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ rusr:
+ description: Tells you if the owner has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xusr:
+ description: Tells you if the owner has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ wgrp:
+ description: Tells you if the owner's group has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ rgrp:
+ description: Tells you if the owner's group has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xgrp:
+ description: Tells you if the owner's group has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ woth:
+ description: Tells you if others have write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ roth:
+ description: Tells you if others have read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xoth:
+ description: Tells you if others have execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isuid:
+ description: Tells you if the invoking user's id matches the owner's id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isgid:
+ description: Tells you if the invoking user's group id matches the owner's group id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ lnk_source:
+ description: Target of the symlink normalized for the remote filesystem
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: /home/foobar/21102015-1445431274-908472971
+ lnk_target:
+ description: Target of the symlink. Note that relative paths remain relative
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: ../foobar/21102015-1445431274-908472971
+ version_added: 2.4
+ md5:
+ description: md5 hash of the file; this will be removed in Ansible 2.9 in
+ favor of the checksum return value
+ returned: success, path exists and user can read stats and path
+ supports hashing and md5 is supported
+ type: str
+ sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
+ checksum:
+ description: hash of the file
+ returned: success, path exists, user can read stats, path supports
+ hashing and supplied checksum algorithm is available
+ type: str
+ sample: 50ba294cdf28c0d5bcde25708df53346825a429f
+ pw_name:
+ description: User name of owner
+ returned: success, path exists, user can read stats, owner name can be looked up and installed python supports it
+ type: str
+ sample: httpd
+ gr_name:
+ description: Group name of owner
+ returned: success, path exists, user can read stats, owner group can be looked up and installed python supports it
+ type: str
+ sample: www-data
+ mimetype:
+ description: file magic data or mime-type
+ returned: success, path exists and user can read stats and
+ installed python supports it and the I(mime) option was true, will
+ return C(unknown) on error.
+ type: str
+ sample: application/pdf; charset=binary
+ charset:
+ description: file character set or encoding
+ returned: success, path exists and user can read stats and
+ installed python supports it and the I(mime) option was true, will
+ return C(unknown) on error.
+ type: str
+ sample: us-ascii
+ readable:
+ description: Tells you if the invoking user has the right to read the path
+ returned: success, path exists and user can read the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ writeable:
+ description: Tells you if the invoking user has the right to write the path
+ returned: success, path exists and user can write the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ executable:
+ description: Tells you if the invoking user has execute permission on the path
+ returned: success, path exists and user can execute the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ attributes:
+ description: list of file attributes
+ returned: success, path exists and user can execute the path
+ type: list
+ sample: [ immutable, extent ]
+ version_added: 2.3
+ version:
+ description: The version/generation attribute of a file according to the filesystem
+ returned: success, path exists, user can execute the path, lsattr is available and filesystem supports
+ type: str
+ sample: "381700746"
+ version_added: 2.3
+'''
+
+import errno
+import grp
+import os
+import pwd
+import stat
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def format_output(module, path, st):
+ mode = st.st_mode
+
+ # back to ansible
+ output = dict(
+ exists=True,
+ path=path,
+ mode="%04o" % stat.S_IMODE(mode),
+ isdir=stat.S_ISDIR(mode),
+ ischr=stat.S_ISCHR(mode),
+ isblk=stat.S_ISBLK(mode),
+ isreg=stat.S_ISREG(mode),
+ isfifo=stat.S_ISFIFO(mode),
+ islnk=stat.S_ISLNK(mode),
+ issock=stat.S_ISSOCK(mode),
+ uid=st.st_uid,
+ gid=st.st_gid,
+ size=st.st_size,
+ inode=st.st_ino,
+ dev=st.st_dev,
+ nlink=st.st_nlink,
+ atime=st.st_atime,
+ mtime=st.st_mtime,
+ ctime=st.st_ctime,
+ wusr=bool(mode & stat.S_IWUSR),
+ rusr=bool(mode & stat.S_IRUSR),
+ xusr=bool(mode & stat.S_IXUSR),
+ wgrp=bool(mode & stat.S_IWGRP),
+ rgrp=bool(mode & stat.S_IRGRP),
+ xgrp=bool(mode & stat.S_IXGRP),
+ woth=bool(mode & stat.S_IWOTH),
+ roth=bool(mode & stat.S_IROTH),
+ xoth=bool(mode & stat.S_IXOTH),
+ isuid=bool(mode & stat.S_ISUID),
+ isgid=bool(mode & stat.S_ISGID),
+ )
+
+ # Platform dependent flags:
+ for other in [
+ # Some Linux
+ ('st_blocks', 'blocks'),
+ ('st_blksize', 'block_size'),
+ ('st_rdev', 'device_type'),
+ ('st_flags', 'flags'),
+ # Some Berkley based
+ ('st_gen', 'generation'),
+ ('st_birthtime', 'birthtime'),
+ # RISCOS
+ ('st_ftype', 'file_type'),
+ ('st_attrs', 'attrs'),
+ ('st_obtype', 'object_type'),
+ # macOS
+ ('st_rsize', 'real_size'),
+ ('st_creator', 'creator'),
+ ('st_type', 'file_type'),
+ ]:
+ if hasattr(st, other[0]):
+ output[other[1]] = getattr(st, other[0])
+
+ return output
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ follow=dict(type='bool', default=False),
+ get_md5=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=True),
+ get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']),
+ get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']),
+ checksum_algorithm=dict(type='str', default='sha1',
+ choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
+ aliases=['checksum', 'checksum_algo']),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params.get('path')
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ follow = module.params.get('follow')
+ get_mime = module.params.get('get_mime')
+ get_attr = module.params.get('get_attributes')
+ get_checksum = module.params.get('get_checksum')
+ checksum_algorithm = module.params.get('checksum_algorithm')
+
+ # NOTE: undocumented option since 2.9 to be removed at a later date if possible (3.0+)
+ # no real reason for keeping other than fear we may break older content.
+ get_md5 = module.params.get('get_md5')
+
+ # main stat data
+ try:
+ if follow:
+ st = os.stat(b_path)
+ else:
+ st = os.lstat(b_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ output = {'exists': False}
+ module.exit_json(changed=False, stat=output)
+
+ module.fail_json(msg=e.strerror)
+
+ # process base results
+ output = format_output(module, path, st)
+
+ # resolved permissions
+ for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
+ output[perm[0]] = os.access(b_path, perm[1])
+
+ # symlink info
+ if output.get('islnk'):
+ output['lnk_source'] = os.path.realpath(b_path)
+ output['lnk_target'] = os.readlink(b_path)
+
+ try: # user data
+ pw = pwd.getpwuid(st.st_uid)
+ output['pw_name'] = pw.pw_name
+ except (TypeError, KeyError):
+ pass
+
+ try: # group data
+ grp_info = grp.getgrgid(st.st_gid)
+ output['gr_name'] = grp_info.gr_name
+ except (KeyError, ValueError, OverflowError):
+ pass
+
+ # checksums
+ if output.get('isreg') and output.get('readable'):
+
+ # NOTE: see above about get_md5
+ if get_md5:
+ # Will fail on FIPS-140 compliant systems
+ try:
+ output['md5'] = module.md5(b_path)
+ except ValueError:
+ output['md5'] = None
+
+ if get_checksum:
+ output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
+
+ # try to get mime data if requested
+ if get_mime:
+ output['mimetype'] = output['charset'] = 'unknown'
+ mimecmd = module.get_bin_path('file')
+ if mimecmd:
+ mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path]
+ try:
+ rc, out, err = module.run_command(mimecmd)
+ if rc == 0:
+ mimetype, charset = out.rsplit(':', 1)[1].split(';')
+ output['mimetype'] = mimetype.strip()
+ output['charset'] = charset.split('=')[1].strip()
+ except Exception:
+ pass
+
+ # try to get attr data
+ if get_attr:
+ output['version'] = None
+ output['attributes'] = []
+ output['attr_flags'] = ''
+ out = module.get_file_attributes(b_path)
+ for x in ('version', 'attributes', 'attr_flags'):
+ if x in out:
+ output[x] = out[x]
+
+ module.exit_json(changed=False, stat=output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/subversion.py b/lib/ansible/modules/subversion.py
new file mode 100644
index 0000000..68aacfd
--- /dev/null
+++ b/lib/ansible/modules/subversion.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: subversion
+short_description: Deploys a subversion repository
+description:
+ - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
+version_added: "0.7"
+author:
+- Dane Summers (@dsummersl) <njharman@gmail.com>
+options:
+ repo:
+ description:
+ - The subversion URL to the repository.
+ type: str
+ required: true
+ aliases: [ name, repository ]
+ dest:
+ description:
+ - Absolute path where the repository should be deployed.
+ - The destination directory must be specified unless I(checkout=no), I(update=no), and I(export=no).
+ type: path
+ revision:
+ description:
+ - Specific revision to checkout.
+ type: str
+ default: HEAD
+ aliases: [ rev, version ]
+ force:
+ description:
+ - If C(true), modified files will be discarded. If C(false), module will fail if it encounters modified files.
+ Prior to 1.9 the default was C(true).
+ type: bool
+ default: "no"
+ in_place:
+ description:
+ - If the directory exists, then the working copy will be checked-out over-the-top using
+ svn checkout --force; if force is specified then existing files with different content are reverted.
+ type: bool
+ default: "no"
+ version_added: "2.6"
+ username:
+ description:
+ - C(--username) parameter passed to svn.
+ type: str
+ password:
+ description:
+ - C(--password) parameter passed to svn when svn is less than version 1.10.0. This is not secure and
+ the password will be leaked to argv.
+ - C(--password-from-stdin) parameter when svn is greater or equal to version 1.10.0.
+ type: str
+ executable:
+ description:
+ - Path to svn executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: path
+ version_added: "1.4"
+ checkout:
+ description:
+ - If C(false), do not check out the repository if it does not exist locally.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ update:
+ description:
+ - If C(false), do not retrieve new revisions from the origin repository.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ export:
+ description:
+ - If C(true), do export instead of checkout/update.
+ type: bool
+ default: "no"
+ version_added: "1.6"
+ switch:
+ description:
+ - If C(false), do not call svn switch before update.
+ default: "yes"
+ version_added: "2.0"
+ type: bool
+ validate_certs:
+ description:
+ - If C(false), passes the C(--trust-server-cert) flag to svn.
+ - If C(true), does not pass the flag.
+ default: "no"
+ version_added: "2.11"
+ type: bool
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - This module does not handle externals.
+
+requirements:
+ - subversion (the command line tool with C(svn) entrypoint)
+'''
+
+EXAMPLES = '''
+- name: Checkout subversion repository to specified folder
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+
+- name: Export subversion directory to folder
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/export
+ export: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally
+ ansible.builtin.subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+ checkout: no
+ update: no
+'''
+
+RETURN = r'''#'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.compat.version import LooseVersion
+
+
+class Subversion(object):
+
+ # Example text matched by the regexp:
+ # Révision : 1889134
+ # 版本: 1889134
+ # Revision: 1889134
+ REVISION_RE = r'^\w+\s?:\s+\d+$'
+
+ def __init__(self, module, dest, repo, revision, username, password, svn_path, validate_certs):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.username = username
+ self.password = password
+ self.svn_path = svn_path
+ self.validate_certs = validate_certs
+
+ def has_option_password_from_stdin(self):
+ rc, version, err = self.module.run_command([self.svn_path, '--version', '--quiet'], check_rc=True)
+ return LooseVersion(version) >= LooseVersion('1.10.0')
+
+ def _exec(self, args, check_rc=True):
+ '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
+ bits = [
+ self.svn_path,
+ '--non-interactive',
+ '--no-auth-cache',
+ ]
+ if not self.validate_certs:
+ bits.append('--trust-server-cert')
+ stdin_data = None
+ if self.username:
+ bits.extend(["--username", self.username])
+ if self.password:
+ if self.has_option_password_from_stdin():
+ bits.append("--password-from-stdin")
+ stdin_data = self.password
+ else:
+ self.module.warn("The authentication provided will be used on the svn command line and is not secure. "
+ "To securely pass credentials, upgrade svn to version 1.10.0 or greater.")
+ bits.extend(["--password", self.password])
+ bits.extend(args)
+ rc, out, err = self.module.run_command(bits, check_rc, data=stdin_data)
+
+ if check_rc:
+ return out.splitlines()
+ else:
+ return rc
+
+ def is_svn_repo(self):
+ '''Checks if path is a SVN Repo.'''
+ rc = self._exec(["info", self.dest], check_rc=False)
+ return rc == 0
+
+ def checkout(self, force=False):
+ '''Creates new svn working directory if it does not already exist.'''
+ cmd = ["checkout"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+ self._exec(cmd)
+
+ def export(self, force=False):
+ '''Export svn repo to directory'''
+ cmd = ["export"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+
+ self._exec(cmd)
+
+ def switch(self):
+ '''Change working directory's repo.'''
+ # switch to ensure we are pointing at correct repo.
+ # it also updates!
+ output = self._exec(["switch", "--revision", self.revision, self.repo, self.dest])
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def update(self):
+ '''Update existing svn working directory.'''
+ output = self._exec(["update", "-r", self.revision, self.dest])
+
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def revert(self):
+ '''Revert svn working directory.'''
+ output = self._exec(["revert", "-R", self.dest])
+ for line in output:
+ if re.search(r'^Reverted ', line) is None:
+ return True
+ return False
+
+ def get_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.dest]))
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get revision'
+
+ url = re.search(r'^URL\s?:.*$', text, re.MULTILINE)
+ if url:
+ url = url.group(0)
+ else:
+ url = 'Unable to get URL'
+
+ return rev, url
+
+ def get_remote_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.repo]))
+ rev = re.search(self.REVISION_RE, text, re.MULTILINE)
+ if rev:
+ rev = rev.group(0)
+ else:
+ rev = 'Unable to get remote revision'
+ return rev
+
+ def has_local_mods(self):
+ '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
+ lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
+ # The --quiet option will return only modified files.
+ # Match only revisioned files, i.e. ignore status '?'.
+ regex = re.compile(r'^[^?X]')
+ # Has local mods if more than 0 modified revisioned files.
+ return len(list(filter(regex.match, lines))) > 0
+
+ def needs_update(self):
+ curr, url = self.get_revision()
+ out2 = '\n'.join(self._exec(["info", "-r", self.revision, self.dest]))
+ head = re.search(self.REVISION_RE, out2, re.MULTILINE)
+ if head:
+ head = head.group(0)
+ else:
+ head = 'Unable to get revision'
+ rev1 = int(curr.split(':')[1].strip())
+ rev2 = int(head.split(':')[1].strip())
+ change = False
+ if rev1 < rev2:
+ change = True
+ return change, curr, head
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(type='str', required=True, aliases=['name', 'repository']),
+ revision=dict(type='str', default='HEAD', aliases=['rev', 'version']),
+ force=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ executable=dict(type='path'),
+ export=dict(type='bool', default=False),
+ checkout=dict(type='bool', default=True),
+ update=dict(type='bool', default=True),
+ switch=dict(type='bool', default=True),
+ in_place=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ revision = module.params['revision']
+ force = module.params['force']
+ username = module.params['username']
+ password = module.params['password']
+ svn_path = module.params['executable'] or module.get_bin_path('svn', True)
+ export = module.params['export']
+ switch = module.params['switch']
+ checkout = module.params['checkout']
+ update = module.params['update']
+ in_place = module.params['in_place']
+ validate_certs = module.params['validate_certs']
+
+ # We screenscrape a huge amount of svn commands so use C locale anytime we
+ # call run_command()
+ locale = get_best_parsable_locale(module)
+ module.run_command_environ_update = dict(LANG=locale, LC_MESSAGES=locale)
+
+ if not dest and (checkout or update or export):
+ module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
+
+ svn = Subversion(module, dest, repo, revision, username, password, svn_path, validate_certs)
+
+ if not export and not update and not checkout:
+ module.exit_json(changed=False, after=svn.get_remote_revision())
+ if export or not os.path.exists(dest):
+ before = None
+ local_mods = False
+ if module.check_mode:
+ module.exit_json(changed=True)
+ elif not export and not checkout:
+ module.exit_json(changed=False)
+ if not export and checkout:
+ svn.checkout()
+ files_changed = True
+ else:
+ svn.export(force=force)
+ files_changed = True
+ elif svn.is_svn_repo():
+ # Order matters. Need to get local mods before switch to avoid false
+ # positives. Need to switch before revert to ensure we are reverting to
+ # correct repo.
+ if not update:
+ module.exit_json(changed=False)
+ if module.check_mode:
+ if svn.has_local_mods() and not force:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ check, before, after = svn.needs_update()
+ module.exit_json(changed=check, before=before, after=after)
+ files_changed = False
+ before = svn.get_revision()
+ local_mods = svn.has_local_mods()
+ if switch:
+ files_changed = svn.switch() or files_changed
+ if local_mods:
+ if force:
+ files_changed = svn.revert() or files_changed
+ else:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ files_changed = svn.update() or files_changed
+ elif in_place:
+ before = None
+ svn.checkout(force=True)
+ files_changed = True
+ local_mods = svn.has_local_mods()
+ if local_mods and force:
+ svn.revert()
+ else:
+ module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest,))
+
+ if export:
+ module.exit_json(changed=True)
+ else:
+ after = svn.get_revision()
+ changed = files_changed or local_mods
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/systemd.py b/lib/ansible/modules/systemd.py
new file mode 100644
index 0000000..3580fa5
--- /dev/null
+++ b/lib/ansible/modules/systemd.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: systemd_service
+author:
+ - Ansible Core Team
+version_added: "2.2"
+short_description: Manage systemd units
+description:
+ - Controls systemd units (services, timers, and so on) on remote hosts.
+options:
+ name:
+ description:
+ - Name of the unit. This parameter takes the name of exactly one unit to work with.
+ - When no extension is given, it is implied to a C(.service) as systemd.
+ - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
+ type: str
+ aliases: [ service, unit ]
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the unit. C(reloaded) will always reload.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the unit should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ force:
+ description:
+ - Whether to override existing symlinks.
+ type: bool
+ version_added: 2.6
+ masked:
+ description:
+ - Whether the unit should be masked or not, a masked unit is impossible to start.
+ type: bool
+ daemon_reload:
+ description:
+ - Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
+ - When set to C(true), runs daemon-reload even if the module does not start or stop anything.
+ type: bool
+ default: no
+ aliases: [ daemon-reload ]
+ daemon_reexec:
+ description:
+ - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
+ type: bool
+ default: no
+ aliases: [ daemon-reexec ]
+ version_added: "2.8"
+ scope:
+ description:
+ - Run systemctl within a given service manager scope, either as the default system scope C(system),
+ the current user's scope C(user), or the scope of all users C(global).
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
+ - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ - The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see example below.
+
+ type: str
+ choices: [ system, user, global ]
+ default: system
+ version_added: "2.7"
+ no_block:
+ description:
+ - Do not synchronously wait for the requested operation to finish.
+ Enqueued job will continue without Ansible blocking on its completion.
+ type: bool
+ default: no
+ version_added: "2.3"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - Since 2.4, one of the following options is required C(state), C(enabled), C(masked), C(daemon_reload), (C(daemon_reexec) since 2.8),
+ and all except C(daemon_reload) and (C(daemon_reexec) since 2.8) also require C(name).
+ - Before 2.4 you always required C(name).
+ - Globs are not supported in name, i.e C(postgres*.service).
+ - The service names might vary by specific OS/distribution
+requirements:
+ - A system managed by systemd.
+'''
+
+EXAMPLES = '''
+- name: Make sure a service unit is running
+ ansible.builtin.systemd:
+ state: started
+ name: httpd
+
+- name: Stop service cron on debian, if running
+ ansible.builtin.systemd:
+ name: cron
+ state: stopped
+
+- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+ ansible.builtin.systemd:
+ state: restarted
+ daemon_reload: true
+ name: crond
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.systemd:
+ name: httpd.service
+ state: reloaded
+
+- name: Enable service httpd and ensure it is not masked
+ ansible.builtin.systemd:
+ name: httpd
+ enabled: true
+ masked: no
+
+- name: Enable a timer unit for dnf-automatic
+ ansible.builtin.systemd:
+ name: dnf-automatic.timer
+ state: started
+ enabled: true
+
+- name: Just force systemd to reread configs (2.4 and above)
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Just force systemd to re-execute itself (2.8 and above)
+ ansible.builtin.systemd:
+ daemon_reexec: true
+
+- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
+ ansible.builtin.systemd:
+ name: myservice
+ state: started
+ scope: user
+ environment:
+ XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
+'''
+
+RETURN = '''
+status:
+ description: A dictionary with the key=value pairs returned from C(systemctl show).
+ returned: success
+ type: complex
+ sample: {
+ "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ActiveEnterTimestampMonotonic": "8135942",
+ "ActiveExitTimestampMonotonic": "0",
+ "ActiveState": "active",
+ "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
+ "AllowIsolate": "no",
+ "Before": "shutdown.target multi-user.target",
+ "BlockIOAccounting": "no",
+ "BlockIOWeight": "1000",
+ "CPUAccounting": "no",
+ "CPUSchedulingPolicy": "0",
+ "CPUSchedulingPriority": "0",
+ "CPUSchedulingResetOnFork": "no",
+ "CPUShares": "1024",
+ "CanIsolate": "no",
+ "CanReload": "yes",
+ "CanStart": "yes",
+ "CanStop": "yes",
+ "CapabilityBoundingSet": "18446744073709551615",
+ "ConditionResult": "yes",
+ "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ConditionTimestampMonotonic": "7902742",
+ "Conflicts": "shutdown.target",
+ "ControlGroup": "/system.slice/crond.service",
+ "ControlPID": "0",
+ "DefaultDependencies": "yes",
+ "Delegate": "no",
+ "Description": "Command Scheduler",
+ "DevicePolicy": "auto",
+ "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
+ "ExecMainCode": "0",
+ "ExecMainExitTimestampMonotonic": "0",
+ "ExecMainPID": "595",
+ "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ExecMainStartTimestampMonotonic": "8134990",
+ "ExecMainStatus": "0",
+ "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "FragmentPath": "/usr/lib/systemd/system/crond.service",
+ "GuessMainPID": "yes",
+ "IOScheduling": "0",
+ "Id": "crond.service",
+ "IgnoreOnIsolate": "no",
+ "IgnoreOnSnapshot": "no",
+ "IgnoreSIGPIPE": "yes",
+ "InactiveEnterTimestampMonotonic": "0",
+ "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "InactiveExitTimestampMonotonic": "8135942",
+ "JobTimeoutUSec": "0",
+ "KillMode": "process",
+ "KillSignal": "15",
+ "LimitAS": "18446744073709551615",
+ "LimitCORE": "18446744073709551615",
+ "LimitCPU": "18446744073709551615",
+ "LimitDATA": "18446744073709551615",
+ "LimitFSIZE": "18446744073709551615",
+ "LimitLOCKS": "18446744073709551615",
+ "LimitMEMLOCK": "65536",
+ "LimitMSGQUEUE": "819200",
+ "LimitNICE": "0",
+ "LimitNOFILE": "4096",
+ "LimitNPROC": "3902",
+ "LimitRSS": "18446744073709551615",
+ "LimitRTPRIO": "0",
+ "LimitRTTIME": "18446744073709551615",
+ "LimitSIGPENDING": "3902",
+ "LimitSTACK": "18446744073709551615",
+ "LoadState": "loaded",
+ "MainPID": "595",
+ "MemoryAccounting": "no",
+ "MemoryLimit": "18446744073709551615",
+ "MountFlags": "0",
+ "Names": "crond.service",
+ "NeedDaemonReload": "no",
+ "Nice": "0",
+ "NoNewPrivileges": "no",
+ "NonBlocking": "no",
+ "NotifyAccess": "none",
+ "OOMScoreAdjust": "0",
+ "OnFailureIsolate": "no",
+ "PermissionsStartOnly": "no",
+ "PrivateNetwork": "no",
+ "PrivateTmp": "no",
+ "RefuseManualStart": "no",
+ "RefuseManualStop": "no",
+ "RemainAfterExit": "no",
+ "Requires": "basic.target",
+ "Restart": "no",
+ "RestartUSec": "100ms",
+ "Result": "success",
+ "RootDirectoryStartOnly": "no",
+ "SameProcessGroup": "no",
+ "SecureBits": "0",
+ "SendSIGHUP": "no",
+ "SendSIGKILL": "yes",
+ "Slice": "system.slice",
+ "StandardError": "inherit",
+ "StandardInput": "null",
+ "StandardOutput": "journal",
+ "StartLimitAction": "none",
+ "StartLimitBurst": "5",
+ "StartLimitInterval": "10000000",
+ "StatusErrno": "0",
+ "StopWhenUnneeded": "no",
+ "SubState": "running",
+ "SyslogLevelPrefix": "yes",
+ "SyslogPriority": "30",
+ "TTYReset": "no",
+ "TTYVHangup": "no",
+ "TTYVTDisallocate": "no",
+ "TimeoutStartUSec": "1min 30s",
+ "TimeoutStopUSec": "1min 30s",
+ "TimerSlackNSec": "50000",
+ "Transient": "no",
+ "Type": "simple",
+ "UMask": "0022",
+ "UnitFileState": "enabled",
+ "WantedBy": "multi-user.target",
+ "Wants": "system.slice",
+ "WatchdogTimestampMonotonic": "0",
+ "WatchdogUSec": "0",
+ }
+''' # NOQA
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.system.chroot import is_chroot
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def is_running_service(service_status):
+ return service_status['ActiveState'] in set(['active', 'activating'])
+
+
+def is_deactivating_service(service_status):
+ return service_status['ActiveState'] in set(['deactivating'])
+
+
+def request_was_ignored(out):
+ return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
+
+
+def parse_systemctl_show(lines):
+ # The output of 'systemctl show' can contain values that span multiple lines. At first glance it
+ # appears that such values are always surrounded by {}, so the previous version of this code
+ # assumed that any value starting with { was a multi-line value; it would then consume lines
+ # until it saw a line that ended with }. However, it is possible to have a single-line value
+ # that starts with { but does not end with } (this could happen in the value for Description=,
+ # for example), and the previous version of this code would then consume all remaining lines as
+ # part of that value. Cryptically, this would lead to Ansible reporting that the service file
+ # couldn't be found.
+ #
+ # To avoid this issue, the following code only accepts multi-line values for keys whose names
+ # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
+ # span multiple lines.
+ parsed = {}
+ multival = []
+ k = None
+ for line in lines:
+ if k is None:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k.startswith('Exec') and v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(v)
+ continue
+ parsed[k] = v.strip()
+ k = None
+ else:
+ multival.append(line)
+ if line.rstrip().endswith('}'):
+ parsed[k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ return parsed
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['service', 'unit']),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ force=dict(type='bool'),
+ masked=dict(type='bool'),
+ daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
+ daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
+ scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
+ no_block=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
+ required_by=dict(
+ state=('name', ),
+ enabled=('name', ),
+ masked=('name', ),
+ ),
+ )
+
+ unit = module.params['name']
+ if unit is not None:
+ for globpattern in (r"*", r"?", r"["):
+ if globpattern in unit:
+ module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
+
+ systemctl = module.get_bin_path('systemctl', True)
+
+ if os.getenv('XDG_RUNTIME_DIR') is None:
+ os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
+
+ ''' Set CLI options depending on params '''
+ # if scope is 'system' or None, we can ignore as there is no extra switch.
+ # The other choices match the corresponding switch
+ if module.params['scope'] != 'system':
+ systemctl += " --%s" % module.params['scope']
+
+ if module.params['no_block']:
+ systemctl += " --no-block"
+
+ if module.params['force']:
+ systemctl += " --force"
+
+ rc = 0
+ out = err = ''
+ result = dict(
+ name=unit,
+ changed=False,
+ status=dict(),
+ )
+
+ # Run daemon-reload first, if requested
+ if module.params['daemon_reload'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
+
+ # Run daemon-reexec
+ if module.params['daemon_reexec'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
+
+ if unit:
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
+ (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
+
+ if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
+ # load return of systemctl show into dictionary for easy access and return
+ if out:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
+
+ is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
+
+ # Check for loading error
+ if is_systemd and not is_masked and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
+
+ # Workaround for https://github.com/ansible/ansible/issues/71528
+ elif err and rc == 1 and 'Failed to parse bus message' in err:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ unit_base, sep, suffix = unit.partition('@')
+ unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
+ (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
+ is_systemd = unit_search in out
+
+ (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
+ result['status']['ActiveState'] = out.rstrip('\n')
+
+ else:
+ # list taken from man systemctl(1) for systemd 244
+ valid_enabled_states = [
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "masked",
+ "masked-runtime",
+ "static",
+ "indirect",
+ "disabled",
+ "generated",
+ "transient"]
+
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ if out.strip() in valid_enabled_states:
+ is_systemd = True
+ else:
+ # fallback list-unit-files as show does not work on some systems (chroot)
+ # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
+ (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
+ if rc == 0:
+ is_systemd = True
+ else:
+ # Check for systemctl command
+ module.run_command(systemctl, check_rc=True)
+
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
+ if module.params['masked'] is not None:
+ # state is not masked unless systemd affirms otherwise
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ masked = out.strip() == "masked"
+
+ if masked != module.params['masked']:
+ result['changed'] = True
+ if module.params['masked']:
+ action = 'mask'
+ else:
+ action = 'unmask'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
+ # do we need to enable the service?
+ enabled = False
+ (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit))
+
+ # check systemctl result or if it is a init script
+ if rc == 0:
+ enabled = True
+ # Check if the service is indirect or alias and if out contains exactly 1 line of string 'indirect'/ 'alias' it's disabled
+ if out.splitlines() == ["indirect"] or out.splitlines() == ["alias"]:
+ enabled = False
+
+ elif rc == 1:
+ # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
+ if module.params['scope'] == 'system' and \
+ is_initd and \
+ not out.strip().endswith('disabled') and \
+ sysv_is_enabled(unit):
+ enabled = True
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
+
+ result['enabled'] = not enabled
+
+ # set service state if requested
+ if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # What is current service state?
+ if 'ActiveState' in result['status']:
+ action = None
+ if module.params['state'] == 'started':
+ if not is_running_service(result['status']):
+ action = 'start'
+ elif module.params['state'] == 'stopped':
+ if is_running_service(result['status']) or is_deactivating_service(result['status']):
+ action = 'stop'
+ else:
+ if not is_running_service(result['status']):
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # check for chroot
+ elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
+ module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
+ else:
+ # this should not happen?
+ module.fail_json(msg="Service is in unknown state", status=result['status'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/systemd_service.py b/lib/ansible/modules/systemd_service.py
new file mode 100644
index 0000000..3580fa5
--- /dev/null
+++ b/lib/ansible/modules/systemd_service.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: systemd_service
+author:
+ - Ansible Core Team
+version_added: "2.2"
+short_description: Manage systemd units
+description:
+ - Controls systemd units (services, timers, and so on) on remote hosts.
+options:
+ name:
+ description:
+ - Name of the unit. This parameter takes the name of exactly one unit to work with.
+ - When no extension is given, it is implied to a C(.service) as systemd.
+ - When using in a chroot environment you always need to specify the name of the unit with the extension. For example, C(crond.service).
+ type: str
+ aliases: [ service, unit ]
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the unit. C(reloaded) will always reload.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the unit should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ force:
+ description:
+ - Whether to override existing symlinks.
+ type: bool
+ version_added: 2.6
+ masked:
+ description:
+ - Whether the unit should be masked or not, a masked unit is impossible to start.
+ type: bool
+ daemon_reload:
+ description:
+ - Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
+ - When set to C(true), runs daemon-reload even if the module does not start or stop anything.
+ type: bool
+ default: no
+ aliases: [ daemon-reload ]
+ daemon_reexec:
+ description:
+ - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
+ type: bool
+ default: no
+ aliases: [ daemon-reexec ]
+ version_added: "2.8"
+ scope:
+ description:
+ - Run systemctl within a given service manager scope, either as the default system scope C(system),
+ the current user's scope C(user), or the scope of all users C(global).
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
+ - "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ - The user must have access, normally given via setting the C(XDG_RUNTIME_DIR) variable, see example below.
+
+ type: str
+ choices: [ system, user, global ]
+ default: system
+ version_added: "2.7"
+ no_block:
+ description:
+ - Do not synchronously wait for the requested operation to finish.
+ Enqueued job will continue without Ansible blocking on its completion.
+ type: bool
+ default: no
+ version_added: "2.3"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - Since 2.4, one of the following options is required C(state), C(enabled), C(masked), C(daemon_reload), (C(daemon_reexec) since 2.8),
+ and all except C(daemon_reload) and (C(daemon_reexec) since 2.8) also require C(name).
+ - Before 2.4 you always required C(name).
+ - Globs are not supported in name, i.e C(postgres*.service).
+ - The service names might vary by specific OS/distribution
+requirements:
+ - A system managed by systemd.
+'''
+
+EXAMPLES = '''
+- name: Make sure a service unit is running
+ ansible.builtin.systemd:
+ state: started
+ name: httpd
+
+- name: Stop service cron on debian, if running
+ ansible.builtin.systemd:
+ name: cron
+ state: stopped
+
+- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+ ansible.builtin.systemd:
+ state: restarted
+ daemon_reload: true
+ name: crond
+
+- name: Reload service httpd, in all cases
+ ansible.builtin.systemd:
+ name: httpd.service
+ state: reloaded
+
+- name: Enable service httpd and ensure it is not masked
+ ansible.builtin.systemd:
+ name: httpd
+ enabled: true
+ masked: no
+
+- name: Enable a timer unit for dnf-automatic
+ ansible.builtin.systemd:
+ name: dnf-automatic.timer
+ state: started
+ enabled: true
+
+- name: Just force systemd to reread configs (2.4 and above)
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Just force systemd to re-execute itself (2.8 and above)
+ ansible.builtin.systemd:
+ daemon_reexec: true
+
+- name: Run a user service when XDG_RUNTIME_DIR is not set on remote login
+ ansible.builtin.systemd:
+ name: myservice
+ state: started
+ scope: user
+ environment:
+ XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
+'''
+
+RETURN = '''
+status:
+ description: A dictionary with the key=value pairs returned from C(systemctl show).
+ returned: success
+ type: complex
+ sample: {
+ "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ActiveEnterTimestampMonotonic": "8135942",
+ "ActiveExitTimestampMonotonic": "0",
+ "ActiveState": "active",
+ "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
+ "AllowIsolate": "no",
+ "Before": "shutdown.target multi-user.target",
+ "BlockIOAccounting": "no",
+ "BlockIOWeight": "1000",
+ "CPUAccounting": "no",
+ "CPUSchedulingPolicy": "0",
+ "CPUSchedulingPriority": "0",
+ "CPUSchedulingResetOnFork": "no",
+ "CPUShares": "1024",
+ "CanIsolate": "no",
+ "CanReload": "yes",
+ "CanStart": "yes",
+ "CanStop": "yes",
+ "CapabilityBoundingSet": "18446744073709551615",
+ "ConditionResult": "yes",
+ "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ConditionTimestampMonotonic": "7902742",
+ "Conflicts": "shutdown.target",
+ "ControlGroup": "/system.slice/crond.service",
+ "ControlPID": "0",
+ "DefaultDependencies": "yes",
+ "Delegate": "no",
+ "Description": "Command Scheduler",
+ "DevicePolicy": "auto",
+ "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
+ "ExecMainCode": "0",
+ "ExecMainExitTimestampMonotonic": "0",
+ "ExecMainPID": "595",
+ "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ExecMainStartTimestampMonotonic": "8134990",
+ "ExecMainStatus": "0",
+ "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "FragmentPath": "/usr/lib/systemd/system/crond.service",
+ "GuessMainPID": "yes",
+ "IOScheduling": "0",
+ "Id": "crond.service",
+ "IgnoreOnIsolate": "no",
+ "IgnoreOnSnapshot": "no",
+ "IgnoreSIGPIPE": "yes",
+ "InactiveEnterTimestampMonotonic": "0",
+ "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "InactiveExitTimestampMonotonic": "8135942",
+ "JobTimeoutUSec": "0",
+ "KillMode": "process",
+ "KillSignal": "15",
+ "LimitAS": "18446744073709551615",
+ "LimitCORE": "18446744073709551615",
+ "LimitCPU": "18446744073709551615",
+ "LimitDATA": "18446744073709551615",
+ "LimitFSIZE": "18446744073709551615",
+ "LimitLOCKS": "18446744073709551615",
+ "LimitMEMLOCK": "65536",
+ "LimitMSGQUEUE": "819200",
+ "LimitNICE": "0",
+ "LimitNOFILE": "4096",
+ "LimitNPROC": "3902",
+ "LimitRSS": "18446744073709551615",
+ "LimitRTPRIO": "0",
+ "LimitRTTIME": "18446744073709551615",
+ "LimitSIGPENDING": "3902",
+ "LimitSTACK": "18446744073709551615",
+ "LoadState": "loaded",
+ "MainPID": "595",
+ "MemoryAccounting": "no",
+ "MemoryLimit": "18446744073709551615",
+ "MountFlags": "0",
+ "Names": "crond.service",
+ "NeedDaemonReload": "no",
+ "Nice": "0",
+ "NoNewPrivileges": "no",
+ "NonBlocking": "no",
+ "NotifyAccess": "none",
+ "OOMScoreAdjust": "0",
+ "OnFailureIsolate": "no",
+ "PermissionsStartOnly": "no",
+ "PrivateNetwork": "no",
+ "PrivateTmp": "no",
+ "RefuseManualStart": "no",
+ "RefuseManualStop": "no",
+ "RemainAfterExit": "no",
+ "Requires": "basic.target",
+ "Restart": "no",
+ "RestartUSec": "100ms",
+ "Result": "success",
+ "RootDirectoryStartOnly": "no",
+ "SameProcessGroup": "no",
+ "SecureBits": "0",
+ "SendSIGHUP": "no",
+ "SendSIGKILL": "yes",
+ "Slice": "system.slice",
+ "StandardError": "inherit",
+ "StandardInput": "null",
+ "StandardOutput": "journal",
+ "StartLimitAction": "none",
+ "StartLimitBurst": "5",
+ "StartLimitInterval": "10000000",
+ "StatusErrno": "0",
+ "StopWhenUnneeded": "no",
+ "SubState": "running",
+ "SyslogLevelPrefix": "yes",
+ "SyslogPriority": "30",
+ "TTYReset": "no",
+ "TTYVHangup": "no",
+ "TTYVTDisallocate": "no",
+ "TimeoutStartUSec": "1min 30s",
+ "TimeoutStopUSec": "1min 30s",
+ "TimerSlackNSec": "50000",
+ "Transient": "no",
+ "Type": "simple",
+ "UMask": "0022",
+ "UnitFileState": "enabled",
+ "WantedBy": "multi-user.target",
+ "Wants": "system.slice",
+ "WatchdogTimestampMonotonic": "0",
+ "WatchdogUSec": "0",
+ }
+''' # NOQA
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.system.chroot import is_chroot
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def is_running_service(service_status):
+ return service_status['ActiveState'] in set(['active', 'activating'])
+
+
+def is_deactivating_service(service_status):
+ return service_status['ActiveState'] in set(['deactivating'])
+
+
+def request_was_ignored(out):
+ return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
+
+
+def parse_systemctl_show(lines):
+ # The output of 'systemctl show' can contain values that span multiple lines. At first glance it
+ # appears that such values are always surrounded by {}, so the previous version of this code
+ # assumed that any value starting with { was a multi-line value; it would then consume lines
+ # until it saw a line that ended with }. However, it is possible to have a single-line value
+ # that starts with { but does not end with } (this could happen in the value for Description=,
+ # for example), and the previous version of this code would then consume all remaining lines as
+ # part of that value. Cryptically, this would lead to Ansible reporting that the service file
+ # couldn't be found.
+ #
+ # To avoid this issue, the following code only accepts multi-line values for keys whose names
+ # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
+ # span multiple lines.
+ parsed = {}
+ multival = []
+ k = None
+ for line in lines:
+ if k is None:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k.startswith('Exec') and v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(v)
+ continue
+ parsed[k] = v.strip()
+ k = None
+ else:
+ multival.append(line)
+ if line.rstrip().endswith('}'):
+ parsed[k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ return parsed
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['service', 'unit']),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ force=dict(type='bool'),
+ masked=dict(type='bool'),
+ daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
+ daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
+ scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
+ no_block=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
+ required_by=dict(
+ state=('name', ),
+ enabled=('name', ),
+ masked=('name', ),
+ ),
+ )
+
+ unit = module.params['name']
+ if unit is not None:
+ for globpattern in (r"*", r"?", r"["):
+ if globpattern in unit:
+ module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
+
+ systemctl = module.get_bin_path('systemctl', True)
+
+ if os.getenv('XDG_RUNTIME_DIR') is None:
+ os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
+
+ ''' Set CLI options depending on params '''
+ # if scope is 'system' or None, we can ignore as there is no extra switch.
+ # The other choices match the corresponding switch
+ if module.params['scope'] != 'system':
+ systemctl += " --%s" % module.params['scope']
+
+ if module.params['no_block']:
+ systemctl += " --no-block"
+
+ if module.params['force']:
+ systemctl += " --force"
+
+ rc = 0
+ out = err = ''
+ result = dict(
+ name=unit,
+ changed=False,
+ status=dict(),
+ )
+
+ # Run daemon-reload first, if requested
+ if module.params['daemon_reload'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
+
+ # Run daemon-reexec
+ if module.params['daemon_reexec'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
+
+ if unit:
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
+ (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
+
+ if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
+ # load return of systemctl show into dictionary for easy access and return
+ if out:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
+
+ is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
+
+ # Check for loading error
+ if is_systemd and not is_masked and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
+
+ # Workaround for https://github.com/ansible/ansible/issues/71528
+ elif err and rc == 1 and 'Failed to parse bus message' in err:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ unit_base, sep, suffix = unit.partition('@')
+ unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep)
+ (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search))
+ is_systemd = unit_search in out
+
+ (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
+ result['status']['ActiveState'] = out.rstrip('\n')
+
+ else:
+ # list taken from man systemctl(1) for systemd 244
+ valid_enabled_states = [
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "masked",
+ "masked-runtime",
+ "static",
+ "indirect",
+ "disabled",
+ "generated",
+ "transient"]
+
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ if out.strip() in valid_enabled_states:
+ is_systemd = True
+ else:
+ # fallback list-unit-files as show does not work on some systems (chroot)
+ # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
+ (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
+ if rc == 0:
+ is_systemd = True
+ else:
+ # Check for systemctl command
+ module.run_command(systemctl, check_rc=True)
+
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
+ if module.params['masked'] is not None:
+ # state is not masked unless systemd affirms otherwise
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ masked = out.strip() == "masked"
+
+ if masked != module.params['masked']:
+ result['changed'] = True
+ if module.params['masked']:
+ action = 'mask'
+ else:
+ action = 'unmask'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
+ # do we need to enable the service?
+ enabled = False
+ (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit))
+
+ # check systemctl result or if it is a init script
+ if rc == 0:
+ enabled = True
+ # Check if the service is indirect or alias and if out contains exactly 1 line of string 'indirect'/ 'alias' it's disabled
+ if out.splitlines() == ["indirect"] or out.splitlines() == ["alias"]:
+ enabled = False
+
+ elif rc == 1:
+ # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
+ if module.params['scope'] == 'system' and \
+ is_initd and \
+ not out.strip().endswith('disabled') and \
+ sysv_is_enabled(unit):
+ enabled = True
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
+
+ result['enabled'] = not enabled
+
+ # set service state if requested
+ if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # What is current service state?
+ if 'ActiveState' in result['status']:
+ action = None
+ if module.params['state'] == 'started':
+ if not is_running_service(result['status']):
+ action = 'start'
+ elif module.params['state'] == 'stopped':
+ if is_running_service(result['status']) or is_deactivating_service(result['status']):
+ action = 'stop'
+ else:
+ if not is_running_service(result['status']):
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # check for chroot
+ elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
+ module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
+ else:
+ # this should not happen?
+ module.fail_json(msg="Service is in unknown state", status=result['status'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/sysvinit.py b/lib/ansible/modules/sysvinit.py
new file mode 100644
index 0000000..b3b9c10
--- /dev/null
+++ b/lib/ansible/modules/sysvinit.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Brian Coca <bcoca@ansible.com>
+# (c) 2017, Adam Miller <admiller@redhat.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: sysvinit
+author:
+ - "Ansible Core Team"
+version_added: "2.6"
+short_description: Manage SysV services.
+description:
+ - Controls services on target hosts that use the SysV init system.
+options:
+ name:
+ required: true
+ description:
+ - Name of the service.
+ type: str
+ aliases: ['service']
+ state:
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ Not all init scripts support C(restarted) nor C(reloaded) natively, so these will both trigger a stop and start as needed.
+ type: str
+ enabled:
+ type: bool
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ sleep:
+ default: 1
+ description:
+ - If the service is being C(restarted) or C(reloaded) then sleep this many seconds between the stop and start command.
+ This helps to workaround badly behaving services.
+ type: int
+ pattern:
+ description:
+ - A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be running.
+ - "This option is mainly for use with init scripts that don't support the 'status' option."
+ type: str
+ runlevels:
+ description:
+ - The runlevels this script should be enabled/disabled from.
+ - Use this to override the defaults set by the package or init script itself.
+ type: list
+ elements: str
+ arguments:
+ description:
+ - Additional arguments provided on the command line that some init scripts accept.
+ type: str
+ aliases: [ 'args' ]
+ daemonize:
+ type: bool
+ description:
+ - Have the module daemonize as the service itself might not do so properly.
+ - This is useful with badly written init scripts or daemons, which
+ commonly manifests as the task hanging as it is still holding the
+ tty or the service dying when the task is over as the connection
+ closes the session.
+ default: no
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - One option other than name is required.
+ - The service names might vary by specific OS/distribution
+requirements:
+ - That the service managed has a corresponding init script.
+'''
+
+EXAMPLES = '''
+- name: Make sure apache2 is started
+ ansible.builtin.sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+
+- name: Make sure apache2 is started on runlevels 3 and 5
+ ansible.builtin.sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+ runlevels:
+ - 3
+ - 5
+'''
+
+RETURN = r'''
+results:
+ description: results from actions taken
+ returned: always
+ type: complex
+ sample: {
+ "attempts": 1,
+ "changed": true,
+ "name": "apache2",
+ "status": {
+ "enabled": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": ""
+ },
+ "stopped": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": "Stopping web server: apache2.\n"
+ }
+ }
+ }
+'''
+
+import re
+from time import sleep
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import sysv_is_enabled, get_sysv_script, sysv_exists, fail_if_missing, get_ps, daemonize
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
+ enabled=dict(type='bool'),
+ sleep=dict(type='int', default=1),
+ pattern=dict(type='str'),
+ arguments=dict(type='str', aliases=['args']),
+ runlevels=dict(type='list', elements='str'),
+ daemonize=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ name = module.params['name']
+ action = module.params['state']
+ enabled = module.params['enabled']
+ runlevels = module.params['runlevels']
+ pattern = module.params['pattern']
+ sleep_for = module.params['sleep']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': name,
+ 'changed': False,
+ 'status': {}
+ }
+
+ # ensure service exists, get script name
+ fail_if_missing(module, sysv_exists(name), name)
+ script = get_sysv_script(name)
+
+ # locate binaries for service management
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']
+
+ # Keeps track of the service status for various runlevels because we can
+ # operate on multiple runlevels at once
+ runlevel_status = {}
+
+ location = {}
+ for binary in binaries:
+ location[binary] = module.get_bin_path(binary, opt_dirs=paths)
+
+ # figure out enable status
+ if runlevels:
+ for rl in runlevels:
+ runlevel_status.setdefault(rl, {})
+ runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
+ else:
+ runlevel_status["enabled"] = sysv_is_enabled(name)
+
+ # figure out started status, everyone does it different!
+ is_started = False
+ worked = False
+
+ # user knows other methods fail and supplied pattern
+ if pattern:
+ worked = is_started = get_ps(module, pattern)
+ else:
+ if location.get('service'):
+ # standard tool that has been 'destandarized' by reimplementation in other OS/distros
+ cmd = '%s %s status' % (location['service'], name)
+ elif script:
+ # maybe script implements status (not LSB)
+ cmd = '%s status' % script
+ else:
+ module.fail_json(msg="Unable to determine service status")
+
+ (rc, out, err) = module.run_command(cmd)
+ if not rc == -1:
+ # special case
+ if name == 'iptables' and "ACCEPT" in out:
+ worked = True
+ is_started = True
+
+ # check output messages, messy but sadly more reliable than rc
+ if not worked and out.count('\n') <= 1:
+
+ cleanout = out.lower().replace(name.lower(), '')
+
+ for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
+ if stopped in cleanout:
+ worked = True
+ break
+
+ if not worked:
+ for started_status in ['run', 'start', 'active']:
+ if started_status in cleanout and "not " not in cleanout:
+ is_started = True
+ worked = True
+ break
+
+ # hope rc is not lying to us, use often used 'bad' returns
+ if not worked and rc in [1, 2, 3, 4, 69]:
+ worked = True
+
+ if not worked:
+ # hail mary
+ if rc == 0:
+ is_started = True
+ worked = True
+ # ps for luck, can only assure positive match
+ elif get_ps(module, name):
+ is_started = True
+ worked = True
+ module.warn("Used ps output to match service name and determine it is up, this is very unreliable")
+
+ if not worked:
+ module.warn("Unable to determine if service is up, assuming it is down")
+
+ ###########################################################################
+ # BEGIN: Enable/Disable
+ result['status'].setdefault('enabled', {})
+ result['status']['enabled']['changed'] = False
+ result['status']['enabled']['rc'] = None
+ result['status']['enabled']['stdout'] = None
+ result['status']['enabled']['stderr'] = None
+ if runlevels:
+ result['status']['enabled']['runlevels'] = runlevels
+ for rl in runlevels:
+ if enabled != runlevel_status[rl]["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if enabled is not None and enabled != runlevel_status["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))
+
+ # Assigned above, might be useful is something goes sideways
+ if not module.check_mode and result['status']['enabled']['changed']:
+ result['status']['enabled']['rc'] = rc
+ result['status']['enabled']['stdout'] = out
+ result['status']['enabled']['stderr'] = err
+ rc, out, err = None, None, None
+
+ if "illegal runlevel specified" in result['status']['enabled']['stderr']:
+ module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
+ # END: Enable/Disable
+ ###########################################################################
+
+ ###########################################################################
+ # BEGIN: state
+ result['status'].setdefault(module.params['state'], {})
+ result['status'][module.params['state']]['changed'] = False
+ result['status'][module.params['state']]['rc'] = None
+ result['status'][module.params['state']]['stdout'] = None
+ result['status'][module.params['state']]['stderr'] = None
+ if action:
+ action = re.sub(r'p?ed$', '', action.lower())
+
+ def runme(doit):
+
+ args = module.params['arguments']
+ cmd = "%s %s %s" % (script, doit, "" if args is None else args)
+
+ # how to run
+ if module.params['daemonize']:
+ (rc, out, err) = daemonize(module, cmd)
+ else:
+ (rc, out, err) = module.run_command(cmd)
+ # FIXME: ERRORS
+
+ if rc != 0:
+ module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)
+
+ return (rc, out, err)
+
+ if action == 'restart':
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+
+ # cannot rely on existing 'restart' in init script
+ for dothis in ['stop', 'start']:
+ (rc, out, err) = runme(dothis)
+ if sleep_for:
+ sleep(sleep_for)
+
+ elif is_started != (action == 'start'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ elif is_started == (action == 'stop'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ if not module.check_mode and result['status'][module.params['state']]['changed']:
+ result['status'][module.params['state']]['rc'] = rc
+ result['status'][module.params['state']]['stdout'] = out
+ result['status'][module.params['state']]['stderr'] = err
+ rc, out, err = None, None, None
+ # END: state
+ ###########################################################################
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/tempfile.py b/lib/ansible/modules/tempfile.py
new file mode 100644
index 0000000..10594de
--- /dev/null
+++ b/lib/ansible/modules/tempfile.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: tempfile
+version_added: "2.3"
+short_description: Creates temporary files and directories
+description:
+ - The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
+ to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
+ you need to use M(ansible.builtin.file) module.
+ - For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
+options:
+ state:
+ description:
+ - Whether to create file or directory.
+ type: str
+ choices: [ directory, file ]
+ default: file
+ path:
+ description:
+ - Location where temporary file or directory should be created.
+ - If path is not specified, the default system temporary directory will be used.
+ type: path
+ prefix:
+ description:
+ - Prefix of file/directory name created by module.
+ type: str
+ default: ansible.
+ suffix:
+ description:
+ - Suffix of file/directory name created by module.
+ type: str
+ default: ""
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_tempfile
+author:
+ - Krzysztof Magosa (@krzysztof-magosa)
+'''
+
+EXAMPLES = """
+- name: Create temporary build directory
+ ansible.builtin.tempfile:
+ state: directory
+ suffix: build
+
+- name: Create temporary file
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tempfile_1
+
+- name: Use the registered var and the file module to remove the temporary file
+ ansible.builtin.file:
+ path: "{{ tempfile_1.path }}"
+ state: absent
+ when: tempfile_1.path is defined
+"""
+
+RETURN = '''
+path:
+ description: Path to created file or directory.
+ returned: success
+ type: str
+ sample: "/tmp/ansible.bMlvdk"
+'''
+
+from os import close
+from tempfile import mkstemp, mkdtemp
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='file', choices=['file', 'directory']),
+ path=dict(type='path'),
+ prefix=dict(type='str', default='ansible.'),
+ suffix=dict(type='str', default=''),
+ ),
+ )
+
+ try:
+ if module.params['state'] == 'file':
+ handle, path = mkstemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+ close(handle)
+ else:
+ path = mkdtemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+
+ module.exit_json(changed=True, path=path)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/template.py b/lib/ansible/modules/template.py
new file mode 100644
index 0000000..7ee581a
--- /dev/null
+++ b/lib/ansible/modules/template.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: template
+version_added: historical
+short_description: Template a file out to a target host
+options:
+ follow:
+ description:
+ - Determine whether symbolic links should be followed.
+ - When set to C(true) symbolic links will be followed, if they exist.
+ - When set to C(false) symbolic links will not be followed.
+ - Previous to Ansible 2.4, this was hardcoded as C(true).
+ type: bool
+ default: no
+ version_added: '2.4'
+notes:
+- For Windows you can use M(ansible.windows.win_template) which uses C(\r\n) as C(newline_sequence) by default.
+- The C(jinja2_native) setting has no effect. Native types are never used in the C(template) module which is by design used for generating text files.
+ For working with templates and utilizing Jinja2 native types see the C(jinja2_native) parameter of the C(template lookup).
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.windows.win_copy
+- module: ansible.windows.win_template
+author:
+- Ansible Core Team
+- Michael DeHaan
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+- action_common_attributes.files
+- backup
+- files
+- template_common
+- validate
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: full
+ vault:
+ support: full
+'''
+
+EXAMPLES = r'''
+- name: Template a file to /etc/file.conf
+ ansible.builtin.template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: '0644'
+
+- name: Template a file, using symbolic modes (equivalent to 0644)
+ ansible.builtin.template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: u=rw,g=r,o=r
+
+- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
+ ansible.builtin.template:
+ src: named.conf_{{ ansible_os_family }}.j2
+ dest: /etc/named.conf
+ group: named
+ setype: named_conf_t
+ mode: 0640
+
+- name: Create a DOS-style text file from a template
+ ansible.builtin.template:
+ src: config.ini.j2
+ dest: /share/windows/config.ini
+ newline_sequence: '\r\n'
+
+- name: Copy a new sudoers file into place, after passing validation with visudo
+ ansible.builtin.template:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -cf %s
+
+- name: Update sshd configuration safely, avoid locking yourself out
+ ansible.builtin.template:
+ src: etc/ssh/sshd_config.j2
+ dest: /etc/ssh/sshd_config
+ owner: root
+ group: root
+ mode: '0600'
+ validate: /usr/sbin/sshd -t -f %s
+ backup: yes
+'''
diff --git a/lib/ansible/modules/unarchive.py b/lib/ansible/modules/unarchive.py
new file mode 100644
index 0000000..26890b5
--- /dev/null
+++ b/lib/ansible/modules/unarchive.py
@@ -0,0 +1,1115 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2016, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: unarchive
+version_added: '1.4'
+short_description: Unpacks an archive after (optionally) copying it from the local machine
+description:
+ - The C(unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
+ - By default, it will copy the source file from the local system to the target before unpacking.
+ - Set C(remote_src=yes) to unpack an archive which already exists on the target.
+ - If checksum validation is desired, use M(ansible.builtin.get_url) or M(ansible.builtin.uri) instead to fetch the file and set C(remote_src=yes).
+ - For Windows targets, use the M(community.windows.win_unzip) module instead.
+options:
+ src:
+ description:
+ - If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
+ target server to existing archive file to unpack.
+ - If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
+ simple cases, for full download support use the M(ansible.builtin.get_url) module.
+ type: path
+ required: true
+ dest:
+ description:
+ - Remote absolute path where the archive should be unpacked.
+ - The given path must exist. Base directory is not created by this module.
+ type: path
+ required: true
+ copy:
+ description:
+ - If true, the file is copied from local controller to the managed (remote) node, otherwise, the plugin will look for src archive on the managed machine.
+ - This option has been deprecated in favor of C(remote_src).
+ - This option is mutually exclusive with C(remote_src).
+ type: bool
+ default: yes
+ creates:
+ description:
+ - If the specified absolute path (file or directory) already exists, this step will B(not) be run.
+ - The specified absolute path (file or directory) must be below the base path given with C(dest:).
+ type: path
+ version_added: "1.6"
+ io_buffer_size:
+ description:
+ - Size of the volatile memory buffer that is used for extracting files from the archive in bytes.
+ type: int
+ default: 65536
+ version_added: "2.12"
+ list_files:
+ description:
+ - If set to True, return the list of files that are contained in the tarball.
+ type: bool
+ default: no
+ version_added: "2.0"
+ exclude:
+ description:
+ - List the directory and file entries that you would like to exclude from the unarchive action.
+ - Mutually exclusive with C(include).
+ type: list
+ default: []
+ elements: str
+ version_added: "2.1"
+ include:
+ description:
+ - List of directory and file entries that you would like to extract from the archive. If C(include)
+ is not empty, only files listed here will be extracted.
+ - Mutually exclusive with C(exclude).
+ type: list
+ default: []
+ elements: str
+ version_added: "2.11"
+ keep_newer:
+ description:
+ - Do not replace existing files that are newer than files from the archive.
+ type: bool
+ default: no
+ version_added: "2.1"
+ extra_opts:
+ description:
+ - Specify additional options by passing in an array.
+ - Each space-separated command-line option should be a new element of the array. See examples.
+ - Command-line options with multiple elements must use multiple lines in the array, one for each element.
+ type: list
+ elements: str
+ default: ""
+ version_added: "2.1"
+ remote_src:
+ description:
+ - Set to C(true) to indicate the archived file is already on the remote system and not local to the Ansible controller.
+ - This option is mutually exclusive with C(copy).
+ type: bool
+ default: no
+ version_added: "2.2"
+ validate_certs:
+ description:
+ - This only applies if using a https URL as the source of the file.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificate.
+ - Prior to 2.2 the code worked as if this was set to C(true).
+ type: bool
+ default: yes
+ version_added: "2.2"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+- action_common_attributes.files
+- decrypt
+- files
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: partial
+ details: Not supported for gzipped tar files.
+ diff_mode:
+ support: partial
+ details: Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not supported, it will always unpack the archive.
+ platform:
+ platforms: posix
+ safe_file_operations:
+ support: none
+ vault:
+ support: full
+todo:
+ - Re-implement tar support using native tarfile module.
+ - Re-implement zip support using native zipfile module.
+notes:
+ - Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
+ - Requires C(zstd) command on target host to expand I(.tar.zst) files.
+ - Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2), I(.tar.xz), and I(.tar.zst) files using C(gtar).
+ - Does not handle I(.gz) files, I(.bz2) files, I(.xz), or I(.zst) files that do not contain a I(.tar) archive.
+ - Existing files/directories in the destination which are not in the archive
+ are not touched. This is the same behavior as a normal archive extraction.
+ - Existing files/directories in the destination which are not in the archive
+ are ignored for purposes of deciding if the archive should be unpacked or not.
+seealso:
+- module: community.general.archive
+- module: community.general.iso_extract
+- module: community.windows.win_unzip
+author: Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Extract foo.tgz into /var/lib/foo
+ ansible.builtin.unarchive:
+ src: foo.tgz
+ dest: /var/lib/foo
+
+- name: Unarchive a file that is already on the remote machine
+ ansible.builtin.unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file that needs to be downloaded (added in 2.0)
+ ansible.builtin.unarchive:
+ src: https://example.com/example.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file with extra options
+ ansible.builtin.unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ extra_opts:
+ - --transform
+ - s/^xxx/yyy/
+'''
+
+RETURN = r'''
+dest:
+ description: Path to the destination directory.
+ returned: always
+ type: str
+ sample: /opt/software
+files:
+ description: List of all the files in the archive.
+ returned: When I(list_files) is True
+ type: list
+ sample: '["file1", "file2"]'
+gid:
+ description: Numerical ID of the group that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+group:
+ description: Name of the group that owns the destination directory.
+ returned: always
+ type: str
+ sample: "librarians"
+handler:
+ description: Archive software handler used to extract and decompress the archive.
+ returned: always
+ type: str
+ sample: "TgzArchive"
+mode:
+ description: String that represents the octal permissions of the destination directory.
+ returned: always
+ type: str
+ sample: "0755"
+owner:
+ description: Name of the user that owns the destination directory.
+ returned: always
+ type: str
+ sample: "paul"
+size:
+ description: The size of destination directory in bytes. Does not include the size of files or subdirectories contained within.
+ returned: always
+ type: int
+ sample: 36
+src:
+ description:
+ - The source archive's path.
+ - If I(src) was a remote web URL, or from the local ansible controller, this shows the temporary location where the download was stored.
+ returned: always
+ type: str
+ sample: "/home/paul/test.tar.gz"
+state:
+ description: State of the destination. Effectively always "directory".
+ returned: always
+ type: str
+ sample: "directory"
+uid:
+ description: Numerical ID of the user that owns the destination directory.
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+import binascii
+import codecs
+import datetime
+import fnmatch
+import grp
+import os
+import platform
+import pwd
+import re
+import stat
+import time
+import traceback
+from functools import partial
+from zipfile import ZipFile, BadZipfile
+
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.urls import fetch_file
+
+try: # python 3.3+
+ from shlex import quote # type: ignore[attr-defined]
+except ImportError: # older python
+ from pipes import quote
+
+# String from tar that shows the tar contents are different from the
+# filesystem
+OWNER_DIFF_RE = re.compile(r': Uid differs$')
+GROUP_DIFF_RE = re.compile(r': Gid differs$')
+MODE_DIFF_RE = re.compile(r': Mode differs$')
+MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
+# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
+EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
+MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
+ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
+INVALID_OWNER_RE = re.compile(r': Invalid owner')
+INVALID_GROUP_RE = re.compile(r': Invalid group')
+
+
+def crc32(path, buffer_size):
+ ''' Return a CRC32 checksum of a file '''
+
+ crc = binascii.crc32(b'')
+ with open(path, 'rb') as f:
+ for b_block in iter(partial(f.read, buffer_size), b''):
+ crc = binascii.crc32(b_block, crc)
+ return crc & 0xffffffff
+
+
+def shell_escape(string):
+ ''' Quote meta-characters in the args for the unix shell '''
+ return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
+
+
+class UnarchiveError(Exception):
+ pass
+
+
+class ZipArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ self.io_buffer_size = module.params["io_buffer_size"]
+ self.excludes = module.params['exclude']
+ self.includes = []
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.zipinfo_cmd_path = None
+ self._files_in_archive = []
+ self._infodict = dict()
+ self.zipinfoflag = ''
+ self.binaries = (
+ ('unzip', 'cmd_path'),
+ ('zipinfo', 'zipinfo_cmd_path'),
+ )
+
+ def _permstr_to_octal(self, modestr, umask):
+ ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
+ revstr = modestr[::-1]
+ mode = 0
+ for j in range(0, 3):
+ for i in range(0, 3):
+ if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
+ mode += 2 ** (i + 3 * j)
+ # The unzip utility does not support setting the stST bits
+# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
+# mode += 2 ** (9 + j)
+ return (mode & ~umask)
+
+ def _legacy_file_list(self):
+ rc, out, err = self.module.run_command([self.cmd_path, '-v', self.src])
+ if rc:
+ raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
+
+ for line in out.splitlines()[3:-2]:
+ fields = line.split(None, 7)
+ self._files_in_archive.append(fields[7])
+ self._infodict[fields[7]] = int(fields[6])
+
+ def _crc32(self, path):
+ if self._infodict:
+ return self._infodict[path]
+
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for item in archive.infolist():
+ self._infodict[item.filename] = int(item.CRC)
+ except Exception:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive')
+
+ return self._infodict[path]
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ self._files_in_archive = []
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for member in archive.namelist():
+ if self.include_files:
+ for include in self.include_files:
+ if fnmatch.fnmatch(member, include):
+ self._files_in_archive.append(to_native(member))
+ else:
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(member, exclude):
+ exclude_flag = True
+ break
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(member))
+ except Exception as e:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive: %s' % to_native(e))
+
+ archive.close()
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ # BSD unzip doesn't support zipinfo listings with timestamp.
+ if self.zipinfoflag:
+ cmd = [self.zipinfo_cmd_path, self.zipinfoflag, '-T', '-s', self.src]
+ else:
+ cmd = [self.zipinfo_cmd_path, '-T', '-s', self.src]
+
+ if self.excludes:
+ cmd.extend(['-x', ] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
+ rc, out, err = self.module.run_command(cmd)
+
+ old_out = out
+ diff = ''
+ out = ''
+ if rc == 0:
+ unarchived = True
+ else:
+ unarchived = False
+
+ # Get some information related to user/group ownership
+ umask = os.umask(0)
+ os.umask(umask)
+ systemtype = platform.system()
+
+ # Get current user and group information
+ groups = os.getgroups()
+ run_uid = os.getuid()
+ run_gid = os.getgid()
+ try:
+ run_owner = pwd.getpwuid(run_uid).pw_name
+ except (TypeError, KeyError):
+ run_owner = run_uid
+ try:
+ run_group = grp.getgrgid(run_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ run_group = run_gid
+
+ # Get future user ownership
+ fut_owner = fut_uid = None
+ if self.file_args['owner']:
+ try:
+ tpw = pwd.getpwnam(self.file_args['owner'])
+ except KeyError:
+ try:
+ tpw = pwd.getpwuid(int(self.file_args['owner']))
+ except (TypeError, KeyError, ValueError):
+ tpw = pwd.getpwuid(run_uid)
+ fut_owner = tpw.pw_name
+ fut_uid = tpw.pw_uid
+ else:
+ try:
+ fut_owner = run_owner
+ except Exception:
+ pass
+ fut_uid = run_uid
+
+ # Get future group ownership
+ fut_group = fut_gid = None
+ if self.file_args['group']:
+ try:
+ tgr = grp.getgrnam(self.file_args['group'])
+ except (ValueError, KeyError):
+ try:
+ # no need to check isdigit() explicitly here, if we fail to
+ # parse, the ValueError will be caught.
+ tgr = grp.getgrgid(int(self.file_args['group']))
+ except (KeyError, ValueError, OverflowError):
+ tgr = grp.getgrgid(run_gid)
+ fut_group = tgr.gr_name
+ fut_gid = tgr.gr_gid
+ else:
+ try:
+ fut_group = run_group
+ except Exception:
+ pass
+ fut_gid = run_gid
+
+ for line in old_out.splitlines():
+ change = False
+
+ pcs = line.split(None, 7)
+ if len(pcs) != 8:
+ # Too few fields... probably a piece of the header or footer
+ continue
+
+ # Check first and seventh field in order to skip header/footer
+ if len(pcs[0]) != 7 and len(pcs[0]) != 10:
+ continue
+ if len(pcs[6]) != 15:
+ continue
+
+ # Possible entries:
+ # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
+ # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
+ # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
+ # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
+ if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
+ continue
+
+ ztype = pcs[0][0]
+ permstr = pcs[0][1:]
+ version = pcs[1]
+ ostype = pcs[2]
+ size = int(pcs[3])
+ path = to_text(pcs[7], errors='surrogate_or_strict')
+
+ # Skip excluded files
+ if path in self.excludes:
+ out += 'Path %s is excluded on request\n' % path
+ continue
+
+ # Itemized change requires L for symlink
+ if path[-1] == '/':
+ if ztype != 'd':
+ err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
+ ftype = 'd'
+ elif ztype == 'l':
+ ftype = 'L'
+ elif ztype == '-':
+ ftype = 'f'
+ elif ztype == '?':
+ ftype = 'f'
+
+ # Some files may be storing FAT permissions, not Unix permissions
+ # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
+ # This permission will then be modified by the system UMask.
+ # BSD always applies the Umask, even to Unix permissions.
+ # For Unix style permissions on Linux or Mac, we want to use them directly.
+ # So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
+
+ if len(permstr) == 6:
+ if path[-1] == '/':
+ permstr = 'rwxrwxrwx'
+ elif permstr == 'rwx---':
+ permstr = 'rwxrwxrwx'
+ else:
+ permstr = 'rw-rw-rw-'
+ file_umask = umask
+ elif 'bsd' in systemtype.lower():
+ file_umask = umask
+ else:
+ file_umask = 0
+
+ # Test string conformity
+ if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
+ raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
+
+ # DEBUG
+# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
+
+ b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
+ try:
+ st = os.lstat(b_dest)
+ except Exception:
+ change = True
+ self.includes.append(path)
+ err += 'Path %s is missing\n' % path
+ diff += '>%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ # Compare file types
+ if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'File %s already exists, but not as a directory\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'f' and not stat.S_ISREG(st.st_mode):
+ change = True
+ unarchived = False
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a regular file\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a symlink\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ itemized = list('.%s.......??' % ftype)
+
+ # Note: this timestamp calculation has a rounding error
+ # somewhere... unzip and this timestamp can be one second off
+ # When that happens, we report a change and re-unzip the file
+ dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
+ timestamp = time.mktime(dt_object.timetuple())
+
+ # Compare file timestamps
+ if stat.S_ISREG(st.st_mode):
+ if self.module.params['keep_newer']:
+ if timestamp > st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s is older, replacing file\n' % path
+ itemized[4] = 't'
+ elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
+ # Add to excluded files, ignore other changes
+ out += 'File %s is newer, excluding file\n' % path
+ self.excludes.append(path)
+ continue
+ else:
+ if timestamp != st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
+ itemized[4] = 't'
+
+ # Compare file sizes
+ if stat.S_ISREG(st.st_mode) and size != st.st_size:
+ change = True
+ err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
+ itemized[3] = 's'
+
+ # Compare file checksums
+ if stat.S_ISREG(st.st_mode):
+ crc = crc32(b_dest, self.io_buffer_size)
+ if crc != self._crc32(path):
+ change = True
+ err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
+ itemized[2] = 'c'
+
+ # Compare file permissions
+
+ # Do not handle permissions of symlinks
+ if ftype != 'L':
+
+ # Use the new mode provided with the action, if there is one
+ if self.file_args['mode']:
+ if isinstance(self.file_args['mode'], int):
+ mode = self.file_args['mode']
+ else:
+ try:
+ mode = int(self.file_args['mode'], 8)
+ except Exception as e:
+ try:
+ mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
+ except ValueError as e:
+ self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
+ # Only special files require no umask-handling
+ elif ztype == '?':
+ mode = self._permstr_to_octal(permstr, 0)
+ else:
+ mode = self._permstr_to_octal(permstr, file_umask)
+
+ if mode != stat.S_IMODE(st.st_mode):
+ change = True
+ itemized[5] = 'p'
+ err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
+
+ # Compare file user ownership
+ owner = uid = None
+ try:
+ owner = pwd.getpwuid(st.st_uid).pw_name
+ except (TypeError, KeyError):
+ uid = st.st_uid
+
+ # If we are not root and requested owner is not our user, fail
+ if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
+ raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
+
+ if owner and owner != fut_owner:
+ change = True
+ err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
+ itemized[6] = 'o'
+ elif uid and uid != fut_uid:
+ change = True
+ err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
+ itemized[6] = 'o'
+
+ # Compare file group ownership
+ group = gid = None
+ try:
+ group = grp.getgrgid(st.st_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ gid = st.st_gid
+
+ if run_uid != 0 and (fut_group != run_group or fut_gid != run_gid) and fut_gid not in groups:
+ raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
+
+ if group and group != fut_group:
+ change = True
+ err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
+ itemized[6] = 'g'
+ elif gid and gid != fut_gid:
+ change = True
+ err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
+ itemized[6] = 'g'
+
+ # Register changed files and finalize diff output
+ if change:
+ if path not in self.includes:
+ self.includes.append(path)
+ diff += '%s %s\n' % (''.join(itemized), path)
+
+ if self.includes:
+ unarchived = False
+
+ # DEBUG
+# out = old_out + out
+
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '-o']
+ if self.opts:
+ cmd.extend(self.opts)
+ cmd.append(self.src)
+ # NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
+ # if self.includes:
+ # NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
+ # cmd.extend(map(shell_escape, self.includes))
+ if self.excludes:
+ cmd.extend(['-x'] + self.excludes)
+ if self.include_files:
+ cmd.extend(self.include_files)
+ cmd.extend(['-d', self.b_dest])
+ rc, out, err = self.module.run_command(cmd)
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ missing = []
+ for b in self.binaries:
+ try:
+ setattr(self, b[1], get_bin_path(b[0]))
+ except ValueError:
+ missing.append(b[0])
+
+ if missing:
+ return False, "Unable to find required '{missing}' binary in the path.".format(missing="' or '".join(missing))
+
+ cmd = [self.cmd_path, '-l', self.src]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return True, None
+ return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, err)
+
+
+class TgzArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ if self.module.check_mode:
+ self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
+ self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
+ self.include_files = self.module.params['include']
+ self.cmd_path = None
+ self.tar_type = None
+ self.zipflag = '-z'
+ self._files_in_archive = []
+
+ def _get_tar_type(self):
+ cmd = [self.cmd_path, '--version']
+ (rc, out, err) = self.module.run_command(cmd)
+ tar_type = None
+ if out.startswith('bsdtar'):
+ tar_type = 'bsd'
+ elif out.startswith('tar') and 'GNU' in out:
+ tar_type = 'gnu'
+ return tar_type
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ cmd = [self.cmd_path, '--list', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+ if rc != 0:
+ raise UnarchiveError('Unable to list files in the archive: %s' % err)
+
+ for filename in out.splitlines():
+ # Compensate for locale-related problems in gtar output (octal unicode representation) #11348
+ # filename = filename.decode('string_escape')
+ filename = to_native(codecs.escape_decode(filename)[0])
+
+ # We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
+ # they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
+ # Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
+ if filename.startswith('/'):
+ filename = filename[1:]
+
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(filename, exclude):
+ exclude_flag = True
+ break
+
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(filename))
+
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+
+ # Check whether the differences are in something that we're
+ # setting anyway
+
+ # What is different
+ unarchived = True
+ old_out = out
+ out = ''
+ run_uid = os.getuid()
+ # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
+ # Only way to be sure is to check request with what is on disk (as we do for zip)
+ # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
+ for line in old_out.splitlines() + err.splitlines():
+ # FIXME: Remove the bogus lines from error-output as well !
+ # Ignore bogus errors on empty filenames (when using --split-component)
+ if EMPTY_FILE_RE.search(line):
+ continue
+ if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
+ out += line + '\n'
+ if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
+ out += line + '\n'
+ if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
+ out += line + '\n'
+ if MOD_TIME_DIFF_RE.search(line):
+ out += line + '\n'
+ if MISSING_FILE_RE.search(line):
+ out += line + '\n'
+ if INVALID_OWNER_RE.search(line):
+ out += line + '\n'
+ if INVALID_GROUP_RE.search(line):
+ out += line + '\n'
+ if out:
+ unarchived = False
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ if self.include_files:
+ cmd.extend(self.include_files)
+ locale = get_best_parsable_locale(self.module)
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LANGUAGE=locale))
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
+ try:
+ self.cmd_path = get_bin_path('gtar')
+ except ValueError:
+ # Fallback to tar
+ try:
+ self.cmd_path = get_bin_path('tar')
+ except ValueError:
+ return False, "Unable to find required 'gtar' or 'tar' binary in the path"
+
+ self.tar_type = self._get_tar_type()
+
+ if self.tar_type != 'gnu':
+ return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
+
+ try:
+ if self.files_in_archive:
+ return True, None
+ except UnarchiveError as e:
+ return False, 'Command "%s" could not handle archive: %s' % (self.cmd_path, to_native(e))
+ # Errors and no files in archive assume that we weren't able to
+ # properly unarchive it
+ return False, 'Command "%s" found no files in archive. Empty archive files are not supported.' % self.cmd_path
+
+
+# Class to handle tar files that aren't compressed
+class TarArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarArchive, self).__init__(src, b_dest, file_args, module)
+ # argument to tar
+ self.zipflag = ''
+
+
+# Class to handle bzip2 compressed tar files
+class TarBzipArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-j'
+
+
+# Class to handle xz compressed tar files
+class TarXzArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-J'
+
+
+# Class to handle zstd compressed tar files
+class TarZstdArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarZstdArchive, self).__init__(src, b_dest, file_args, module)
+ # GNU Tar supports the --use-compress-program option to
+ # specify which executable to use for
+ # compression/decompression.
+ #
+ # Note: some flavors of BSD tar support --zstd (e.g., FreeBSD
+ # 12.2), but the TgzArchive class only supports GNU Tar.
+ self.zipflag = '--use-compress-program=zstd'
+
+
+class ZipZArchive(ZipArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(ZipZArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipinfoflag = '-Z'
+ self.binaries = (
+ ('unzip', 'cmd_path'),
+ ('unzip', 'zipinfo_cmd_path'),
+ )
+
+ def can_handle_archive(self):
+ unzip_available, error_msg = super(ZipZArchive, self).can_handle_archive()
+
+ if not unzip_available:
+ return unzip_available, error_msg
+
+ # Ensure unzip -Z is available before we use it in is_unarchive
+ cmd = [self.zipinfo_cmd_path, self.zipinfoflag]
+ rc, out, err = self.module.run_command(cmd)
+ if 'zipinfo' in out.lower():
+ return True, None
+ return False, 'Command "unzip -Z" could not handle archive: %s' % err
+
+
+# try handlers in order and return the one that works or bail if none work
+def pick_handler(src, dest, file_args, module):
+ handlers = [ZipArchive, ZipZArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive, TarZstdArchive]
+ reasons = set()
+ for handler in handlers:
+ obj = handler(src, dest, file_args, module)
+ (can_handle, reason) = obj.can_handle_archive()
+ if can_handle:
+ return obj
+ reasons.add(reason)
+ reason_msg = '\n'.join(reasons)
+ module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.\n%s' % (src, reason_msg))
+
+
+def main():
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ dest=dict(type='path', required=True),
+ remote_src=dict(type='bool', default=False),
+ creates=dict(type='path'),
+ list_files=dict(type='bool', default=False),
+ keep_newer=dict(type='bool', default=False),
+ exclude=dict(type='list', elements='str', default=[]),
+ include=dict(type='list', elements='str', default=[]),
+ extra_opts=dict(type='list', elements='str', default=[]),
+ validate_certs=dict(type='bool', default=True),
+ io_buffer_size=dict(type='int', default=64 * 1024),
+
+ # Options that are for the action plugin, but ignored by the module itself.
+ # We have them here so that the sanity tests pass without ignores, which
+ # reduces the likelihood of further bugs added.
+ copy=dict(type='bool', default=True),
+ decrypt=dict(type='bool', default=True),
+ ),
+ add_file_common_args=True,
+ # check-mode only works for zip files, we cover that later
+ supports_check_mode=True,
+ mutually_exclusive=[('include', 'exclude')],
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ remote_src = module.params['remote_src']
+ file_args = module.load_file_common_arguments(module.params)
+
+ # did tar file arrive?
+ if not os.path.exists(src):
+ if not remote_src:
+ module.fail_json(msg="Source '%s' failed to transfer" % src)
+ # If remote_src=true, and src= contains ://, try and download the file to a temp directory.
+ elif '://' in src:
+ src = fetch_file(module, src)
+ else:
+ module.fail_json(msg="Source '%s' does not exist" % src)
+ if not os.access(src, os.R_OK):
+ module.fail_json(msg="Source '%s' not readable" % src)
+
+ # skip working with 0 size archives
+ try:
+ if os.path.getsize(src) == 0:
+ module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
+ except Exception as e:
+ module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
+
+ # is dest OK to receive tar file?
+ if not os.path.isdir(b_dest):
+ module.fail_json(msg="Destination '%s' is not a directory" % dest)
+
+ handler = pick_handler(src, b_dest, file_args, module)
+
+ res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
+
+ # do we need to do unpack?
+ check_results = handler.is_unarchived()
+
+ # DEBUG
+ # res_args['check_results'] = check_results
+
+ if module.check_mode:
+ res_args['changed'] = not check_results['unarchived']
+ elif check_results['unarchived']:
+ res_args['changed'] = False
+ else:
+ # do the unpack
+ try:
+ res_args['extract_results'] = handler.unarchive()
+ if res_args['extract_results']['rc'] != 0:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ except IOError:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ else:
+ res_args['changed'] = True
+
+ # Get diff if required
+ if check_results.get('diff', False):
+ res_args['diff'] = {'prepared': check_results['diff']}
+
+ # Run only if we found differences (idempotence) or diff was missing
+ if res_args.get('diff', True) and not module.check_mode:
+ # do we need to change perms?
+ top_folders = []
+ for filename in handler.files_in_archive:
+ file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
+
+ try:
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+
+ if '/' in filename:
+ top_folder_path = filename.split('/')[0]
+ if top_folder_path not in top_folders:
+ top_folders.append(top_folder_path)
+
+ # make sure top folders have the right permissions
+ # https://github.com/ansible/ansible/issues/35426
+ if top_folders:
+ for f in top_folders:
+ file_args['path'] = "%s/%s" % (dest, f)
+ try:
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+
+ if module.params['list_files']:
+ res_args['files'] = handler.files_in_archive
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
new file mode 100644
index 0000000..f68b86a
--- /dev/null
+++ b/lib/ansible/modules/uri.py
@@ -0,0 +1,779 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: uri
+short_description: Interacts with webservices
+description:
+ - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
+ HTTP authentication mechanisms.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+version_added: "1.1"
+options:
+ ciphers:
+ description:
+ - SSL/TLS Ciphers to use for the request.
+ - 'When a list is provided, all ciphers are joined in order with C(:)'
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ type: list
+ elements: str
+ version_added: '2.14'
+ decompress:
+ description:
+ - Whether to attempt to decompress gzip content-encoded responses
+ type: bool
+ default: true
+ version_added: '2.14'
+ url:
+ description:
+ - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - A path of where to download the file to (if desired). If I(dest) is a
+ directory, the basename of the file on the remote server will be used.
+ type: path
+ url_username:
+ description:
+ - A username for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ user ]
+ url_password:
+ description:
+ - A password for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ password ]
+ body:
+ description:
+ - The body of the http request/response to the web service. If C(body_format) is set
+ to 'json' it will take an already formatted JSON string or convert a data structure
+ into JSON.
+ - If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
+ or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
+ - If C(body_format) is set to 'form-multipart' it will convert a dictionary
+ into 'multipart/form-multipart' body. (Added in v2.10)
+ type: raw
+ body_format:
+ description:
+ - The serialization format of the body. When set to C(json), C(form-multipart), or C(form-urlencoded), encodes
+ the body argument, if needed, and automatically sets the Content-Type header accordingly.
+ - As of v2.3 it is possible to override the C(Content-Type) header, when
+ set to C(json) or C(form-urlencoded) via the I(headers) option.
+ - The 'Content-Type' header cannot be overridden when using C(form-multipart)
+ - C(form-urlencoded) was added in v2.7.
+ - C(form-multipart) was added in v2.10.
+ type: str
+ choices: [ form-urlencoded, json, raw, form-multipart ]
+ default: raw
+ version_added: "2.0"
+ method:
+ description:
+ - The HTTP method of the request or response.
+ - In more recent versions we do not restrict the method at the module level anymore
+ but it still must be a valid method accepted by the service handling the request.
+ type: str
+ default: GET
+ return_content:
+ description:
+ - Whether or not to return the body of the response as a "content" key in
+ the dictionary result no matter it succeeded or failed.
+ - Independently of this option, if the reported Content-type is "application/json", then the JSON is
+ always loaded into a key called C(json) in the dictionary results.
+ type: bool
+ default: no
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - The library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ follow_redirects:
+ description:
+ - Whether or not the URI module should follow redirects. C(all) will follow all redirects.
+ C(safe) will follow only "safe" redirects, where "safe" means that the client is only
+ doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
+ any redirects. Note that C(true) and C(false) choices are accepted for backwards compatibility,
+ where C(true) is the equivalent of C(all) and C(false) is the equivalent of C(safe). C(true) and C(false)
+ are deprecated and will be removed in some future version of Ansible.
+ type: str
+ choices: ['all', 'no', 'none', 'safe', 'urllib2', 'yes']
+ default: safe
+ creates:
+ description:
+ - A filename, when it already exists, this step will not be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will not be run.
+ type: path
+ status_code:
+ description:
+ - A list of valid, numeric, HTTP status codes that signifies success of the request.
+ type: list
+ elements: int
+ default: [ 200 ]
+ timeout:
+ description:
+ - The socket level timeout in seconds
+ type: int
+ default: 30
+ headers:
+ description:
+ - Add custom HTTP headers to a request in the format of a YAML hash. As
+ of C(2.3) supplying C(Content-Type) here will override the header
+ generated by supplying C(json) or C(form-urlencoded) for I(body_format).
+ type: dict
+ version_added: '2.1'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ - Prior to 1.9.2 the code defaulted to C(false).
+ type: bool
+ default: true
+ version_added: '1.9.2'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ ca_path:
+ description:
+ - PEM formatted file that contains a CA certificate to be used for validation
+ type: path
+ version_added: '2.11'
+ src:
+ description:
+ - Path to file to be submitted to the remote server.
+ - Cannot be used with I(body).
+ - Should be used with I(force_basic_auth) to ensure success when the remote end sends a 401.
+ type: path
+ version_added: '2.7'
+ remote_src:
+ description:
+ - If C(false), the module will search for the C(src) on the controller node.
+ - If C(true), the module will search for the C(src) on the managed (remote) node.
+ type: bool
+ default: no
+ version_added: '2.7'
+ force:
+ description:
+ - If C(true) do not get a cached copy.
+ type: bool
+ default: no
+ use_proxy:
+ description:
+ - If C(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ type: bool
+ default: true
+ unix_socket:
+ description:
+ - Path to Unix domain socket to use for connection
+ type: path
+ version_added: '2.8'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ unredirected_headers:
+ description:
+ - A list of header names that will not be sent on subsequent redirected requests. This list is case
+ insensitive. By default all headers will be redirected. In some cases it may be beneficial to list
+ headers such as C(Authorization) here to avoid potential credential exposure.
+ default: []
+ type: list
+ elements: str
+ version_added: '2.12'
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
+ use_netrc:
+ description:
+ - Determining whether to use credentials from ``~/.netrc`` file
+ - By default .netrc is used with Basic authentication headers
+ - When set to False, .netrc credentials are ignored
+ type: bool
+ default: true
+ version_added: '2.14'
+extends_documentation_fragment:
+ - action_common_attributes
+ - files
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The dependency on httplib2 was removed in Ansible 2.1.
+ - The module returns all the HTTP headers in lower-case.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+seealso:
+- module: ansible.builtin.get_url
+- module: ansible.windows.win_uri
+author:
+- Romeo Theriault (@romeotheriault)
+'''
+
+EXAMPLES = r'''
+- name: Check that you can connect (GET) to a page and it returns a status 200
+ ansible.builtin.uri:
+ url: http://www.example.com
+
+- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
+ ansible.builtin.uri:
+ url: http://www.example.com
+ return_content: true
+ register: this
+ failed_when: "'AWESOME' not in this.content"
+
+- name: Create a JIRA issue
+ ansible.builtin.uri:
+ url: https://your.jira.example.com/rest/api/2/issue/
+ user: your_username
+ password: your_pass
+ method: POST
+ body: "{{ lookup('ansible.builtin.file','issue.json') }}"
+ force_basic_auth: true
+ status_code: 201
+ body_format: json
+
+- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ name: your_username
+ password: your_password
+ enter: Sign in
+ status_code: 302
+ register: login
+
+- name: Login to a form based webpage using a list of tuples
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ - [ name, your_username ]
+ - [ password, your_password ]
+ - [ enter, Sign in ]
+ status_code: 302
+ register: login
+
+- name: Upload a file via multipart/form-multipart
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ body_format: form-multipart
+ body:
+ file1:
+ filename: /bin/true
+ mime_type: application/octet-stream
+ file2:
+ content: text based file content
+ filename: fake.txt
+ mime_type: text/plain
+ text_form_field: value
+
+- name: Connect to website using a previously stored cookie
+ ansible.builtin.uri:
+ url: https://your.form.based.auth.example.com/dashboard.php
+ method: GET
+ return_content: true
+ headers:
+ Cookie: "{{ login.cookies_string }}"
+
+- name: Queue build of a project in Jenkins
+ ansible.builtin.uri:
+ url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
+ user: "{{ jenkins.user }}"
+ password: "{{ jenkins.password }}"
+ method: GET
+ force_basic_auth: true
+ status_code: 201
+
+- name: POST from contents of local file
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: file.json
+
+- name: POST from contents of remote file
+ ansible.builtin.uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: /path/to/my/file.json
+ remote_src: true
+
+- name: Create workspaces in Log analytics Azure
+ ansible.builtin.uri:
+ url: https://www.mms.microsoft.com/Embedded/Api/ConfigDataSources/LogManagementData/Save
+ method: POST
+ body_format: json
+ status_code: [200, 202]
+ return_content: true
+ headers:
+ Content-Type: application/json
+ x-ms-client-workspace-path: /subscriptions/{{ sub_id }}/resourcegroups/{{ res_group }}/providers/microsoft.operationalinsights/workspaces/{{ w_spaces }}
+ x-ms-client-platform: ibiza
+ x-ms-client-auth-token: "{{ token_az }}"
+ body:
+
+- name: Pause play until a URL is reachable from this host
+ ansible.builtin.uri:
+ url: "http://192.0.2.1/some/test"
+ follow_redirects: none
+ method: GET
+ register: _result
+ until: _result.status == 200
+ retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
+ delay: 5 # Every 5 seconds
+
+- name: Provide SSL/TLS ciphers as a list
+ uri:
+ url: https://example.org
+ ciphers:
+ - '@SECLEVEL=2'
+ - ECDH+AESGCM
+ - ECDH+CHACHA20
+ - ECDH+AES
+ - DHE+AES
+ - '!aNULL'
+ - '!eNULL'
+ - '!aDSS'
+ - '!SHA1'
+ - '!AESCCM'
+
+- name: Provide SSL/TLS ciphers as an OpenSSL formatted cipher list
+ uri:
+ url: https://example.org
+ ciphers: '@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM'
+'''
+
+RETURN = r'''
+# The return information includes all the HTTP headers in lower-case.
+content:
+ description: The response body content.
+ returned: status not in status_code or return_content is true
+ type: str
+ sample: "{}"
+cookies:
+ description: The cookie values placed in cookie jar.
+ returned: on success
+ type: dict
+ sample: {"SESSIONID": "[SESSIONID]"}
+ version_added: "2.4"
+cookies_string:
+ description: The value for future request Cookie headers.
+ returned: on success
+ type: str
+ sample: "SESSIONID=[SESSIONID]"
+ version_added: "2.6"
+elapsed:
+ description: The number of seconds that elapsed while performing the download.
+ returned: on success
+ type: int
+ sample: 23
+msg:
+ description: The HTTP message from the request.
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+path:
+ description: destination file/path
+ returned: dest is defined
+ type: str
+ sample: /path/to/file.txt
+redirected:
+ description: Whether the request was redirected.
+ returned: on success
+ type: bool
+ sample: false
+status:
+ description: The HTTP status code from the request.
+ returned: always
+ type: int
+ sample: 200
+url:
+ description: The actual URL used for the request.
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import datetime
+import json
+import os
+import re
+import shutil
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule, sanitize_keys
+from ansible.module_utils.six import PY2, PY3, binary_type, iteritems, string_types
+from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.urls import fetch_url, get_response_filename, parse_content_type, prepare_multipart, url_argument_spec
+
+JSON_CANDIDATES = {'json', 'javascript'}
+
+# List of response key names we do not want sanitize_keys() to change.
+NO_MODIFY_KEYS = frozenset(
+ ('msg', 'exception', 'warnings', 'deprecations', 'failed', 'skipped',
+ 'changed', 'rc', 'stdout', 'stderr', 'elapsed', 'path', 'location',
+ 'content_type')
+)
+
+
+def format_message(err, resp):
+ msg = resp.pop('msg')
+ return err + (' %s' % msg if msg else '')
+
+
+def write_file(module, dest, content, resp):
+ """
+ Create temp file and write content to dest file only if content changed
+ """
+
+ tmpsrc = None
+
+ try:
+ fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
+ with os.fdopen(fd, 'wb') as f:
+ if isinstance(content, binary_type):
+ f.write(content)
+ else:
+ shutil.copyfileobj(content, f)
+ except Exception as e:
+ if tmpsrc and os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
+ module.fail_json(msg=msg, **resp)
+
+ checksum_src = module.sha1(tmpsrc)
+ checksum_dest = module.sha1(dest)
+
+ if checksum_src != checksum_dest:
+ try:
+ module.atomic_move(tmpsrc, dest)
+ except Exception as e:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
+ module.fail_json(msg=msg, **resp)
+
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+
+
+def absolute_location(url, location):
+ """Attempts to create an absolute URL based on initial URL, and
+ next URL, specifically in the case of a ``Location`` header.
+ """
+
+ if '://' in location:
+ return location
+
+ elif location.startswith('/'):
+ parts = urlsplit(url)
+ base = url.replace(parts[2], '')
+ return '%s%s' % (base, location)
+
+ elif not location.startswith('/'):
+ base = os.path.dirname(url)
+ return '%s/%s' % (base, location)
+
+ else:
+ return location
+
+
+def kv_list(data):
+ ''' Convert data into a list of key-value tuples '''
+ if data is None:
+ return None
+
+ if isinstance(data, Sequence):
+ return list(data)
+
+ if isinstance(data, Mapping):
+ return list(data.items())
+
+ raise TypeError('cannot form-urlencode body, expect list or dict')
+
+
+def form_urlencoded(body):
+ ''' Convert data into a form-urlencoded string '''
+ if isinstance(body, string_types):
+ return body
+
+ if isinstance(body, (Mapping, Sequence)):
+ result = []
+ # Turn a list of lists into a list of tuples that urlencode accepts
+ for key, values in kv_list(body):
+ if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
+ values = [values]
+ for value in values:
+ if value is not None:
+ result.append((to_text(key), to_text(value)))
+ return urlencode(result, doseq=True)
+
+ return body
+
+
+def uri(module, url, dest, body, body_format, method, headers, socket_timeout, ca_path, unredirected_headers, decompress,
+ ciphers, use_netrc):
+ # is dest is set and is a directory, let's check if we get redirected and
+ # set the filename from that url
+
+ src = module.params['src']
+ if src:
+ try:
+ headers.update({
+ 'Content-Length': os.stat(src).st_size
+ })
+ data = open(src, 'rb')
+ except OSError:
+ module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
+ else:
+ data = body
+
+ kwargs = {}
+ if dest is not None and os.path.isfile(dest):
+ # if destination file already exist, only download if file newer
+ kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
+
+ resp, info = fetch_url(module, url, data=data, headers=headers,
+ method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
+ ca_path=ca_path, unredirected_headers=unredirected_headers,
+ use_proxy=module.params['use_proxy'], decompress=decompress,
+ ciphers=ciphers, use_netrc=use_netrc, **kwargs)
+
+ if src:
+ # Try to close the open file handle
+ try:
+ data.close()
+ except Exception:
+ pass
+
+ return resp, info
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dest=dict(type='path'),
+ url_username=dict(type='str', aliases=['user']),
+ url_password=dict(type='str', aliases=['password'], no_log=True),
+ body=dict(type='raw'),
+ body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
+ src=dict(type='path'),
+ method=dict(type='str', default='GET'),
+ return_content=dict(type='bool', default=False),
+ follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ status_code=dict(type='list', elements='int', default=[200]),
+ timeout=dict(type='int', default=30),
+ headers=dict(type='dict', default={}),
+ unix_socket=dict(type='path'),
+ remote_src=dict(type='bool', default=False),
+ ca_path=dict(type='path', default=None),
+ unredirected_headers=dict(type='list', elements='str', default=[]),
+ decompress=dict(type='bool', default=True),
+ ciphers=dict(type='list', elements='str'),
+ use_netrc=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ mutually_exclusive=[['body', 'src']],
+ )
+
+ url = module.params['url']
+ body = module.params['body']
+ body_format = module.params['body_format'].lower()
+ method = module.params['method'].upper()
+ dest = module.params['dest']
+ return_content = module.params['return_content']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ status_code = [int(x) for x in list(module.params['status_code'])]
+ socket_timeout = module.params['timeout']
+ ca_path = module.params['ca_path']
+ dict_headers = module.params['headers']
+ unredirected_headers = module.params['unredirected_headers']
+ decompress = module.params['decompress']
+ ciphers = module.params['ciphers']
+ use_netrc = module.params['use_netrc']
+
+ if not re.match('^[A-Z]+$', method):
+ module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
+
+ if body_format == 'json':
+ # Encode the body unless its a string, then assume it is pre-formatted JSON
+ if not isinstance(body, string_types):
+ body = json.dumps(body)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/json'
+ elif body_format == 'form-urlencoded':
+ if not isinstance(body, string_types):
+ try:
+ body = form_urlencoded(body)
+ except ValueError as e:
+ module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
+ elif body_format == 'form-multipart':
+ try:
+ content_type, body = prepare_multipart(body)
+ except (TypeError, ValueError) as e:
+ module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
+ dict_headers['Content-Type'] = content_type
+
+ if creates is not None:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of uri executions.
+ if os.path.exists(creates):
+ module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
+
+ if removes is not None:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of uri executions.
+ if not os.path.exists(removes):
+ module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
+
+ # Make the request
+ start = datetime.datetime.utcnow()
+ r, info = uri(module, url, dest, body, body_format, method,
+ dict_headers, socket_timeout, ca_path, unredirected_headers,
+ decompress, ciphers, use_netrc)
+
+ elapsed = (datetime.datetime.utcnow() - start).seconds
+
+ if r and dest is not None and os.path.isdir(dest):
+ filename = get_response_filename(r) or 'index.html'
+ dest = os.path.join(dest, filename)
+
+ if r and r.fp is not None:
+ # r may be None for some errors
+ # r.fp may be None depending on the error, which means there are no headers either
+ content_type, main_type, sub_type, content_encoding = parse_content_type(r)
+ else:
+ content_type = 'application/octet-stream'
+ main_type = 'application'
+ sub_type = 'octet-stream'
+ content_encoding = 'utf-8'
+
+ maybe_json = content_type and sub_type.lower() in JSON_CANDIDATES
+ maybe_output = maybe_json or return_content or info['status'] not in status_code
+
+ if maybe_output:
+ try:
+ if PY3 and (r.fp is None or r.closed):
+ raise TypeError
+ content = r.read()
+ except (AttributeError, TypeError):
+ # there was no content, but the error read()
+ # may have been stored in the info as 'body'
+ content = info.pop('body', b'')
+ elif r:
+ content = r
+ else:
+ content = None
+
+ resp = {}
+ resp['redirected'] = info['url'] != url
+ resp.update(info)
+
+ resp['elapsed'] = elapsed
+ resp['status'] = int(resp['status'])
+ resp['changed'] = False
+
+ # Write the file out if requested
+ if r and dest is not None:
+ if resp['status'] in status_code and resp['status'] != 304:
+ write_file(module, dest, content, resp)
+ # allow file attribute changes
+ resp['changed'] = True
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
+ resp['path'] = dest
+
+ # Transmogrify the headers, replacing '-' with '_', since variables don't
+ # work with dashes.
+ # In python3, the headers are title cased. Lowercase them to be
+ # compatible with the python2 behaviour.
+ uresp = {}
+ for key, value in iteritems(resp):
+ ukey = key.replace("-", "_").lower()
+ uresp[ukey] = value
+
+ if 'location' in uresp:
+ uresp['location'] = absolute_location(url, uresp['location'])
+
+ # Default content_encoding to try
+ if isinstance(content, binary_type):
+ u_content = to_text(content, encoding=content_encoding)
+ if maybe_json:
+ try:
+ js = json.loads(u_content)
+ uresp['json'] = js
+ except Exception:
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+ else:
+ u_content = None
+
+ if module.no_log_values:
+ uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)
+
+ if resp['status'] not in status_code:
+ uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
+ if return_content:
+ module.fail_json(content=u_content, **uresp)
+ else:
+ module.fail_json(**uresp)
+ elif return_content:
+ module.exit_json(content=u_content, **uresp)
+ else:
+ module.exit_json(**uresp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/user.py b/lib/ansible/modules/user.py
new file mode 100644
index 0000000..2fc4e47
--- /dev/null
+++ b/lib/ansible/modules/user.py
@@ -0,0 +1,3253 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: user
+version_added: "0.2"
+short_description: Manage user accounts
+description:
+ - Manage user accounts and user attributes.
+ - For Windows targets, use the M(ansible.windows.win_user) module instead.
+options:
+ name:
+ description:
+ - Name of the user to create, remove or modify.
+ type: str
+ required: true
+ aliases: [ user ]
+ uid:
+ description:
+ - Optionally sets the I(UID) of the user.
+ type: int
+ comment:
+ description:
+ - Optionally sets the description (aka I(GECOS)) of user account.
+ type: str
+ hidden:
+ description:
+ - macOS only, optionally hide the user from the login window and system preferences.
+ - The default will be C(true) if the I(system) option is used.
+ type: bool
+ version_added: "2.6"
+ non_unique:
+ description:
+ - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
+ type: bool
+ default: no
+ version_added: "1.1"
+ seuser:
+ description:
+ - Optionally sets the seuser type (user_u) on selinux enabled systems.
+ type: str
+ version_added: "2.1"
+ group:
+ description:
+ - Optionally sets the user's primary group (takes a group name).
+ type: str
+ groups:
+ description:
+ - List of groups user will be added to.
+ - By default, the user is removed from all other groups. Configure C(append) to modify this.
+ - When set to an empty string C(''),
+ the user is removed from all groups except the primary group.
+ - Before Ansible 2.3, the only input format allowed was a comma separated string.
+ type: list
+ elements: str
+ append:
+ description:
+ - If C(true), add the user to the groups specified in C(groups).
+ - If C(false), user will only be added to the groups specified in C(groups),
+ removing them from all other groups.
+ type: bool
+ default: no
+ shell:
+ description:
+ - Optionally set the user's shell.
+ - On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
+ Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
+ - See notes for details on how other operating systems determine the default shell by
+ the underlying tool.
+ type: str
+ home:
+ description:
+ - Optionally set the user's home directory.
+ type: path
+ skeleton:
+ description:
+ - Optionally set a home skeleton directory.
+ - Requires C(create_home) option!
+ type: str
+ version_added: "2.0"
+ password:
+ description:
+ - If provided, set the user's password to the provided encrypted hash (Linux) or plain text password (macOS).
+ - B(Linux/Unix/POSIX:) Enter the hashed password as the value.
+ - See L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
+ for details on various ways to generate the hash of a password.
+ - To create an account with a locked/disabled password on Linux systems, set this to C('!') or C('*').
+ - To create an account with a locked/disabled password on OpenBSD, set this to C('*************').
+ - B(OS X/macOS:) Enter the cleartext password as the value. Be sure to take relevant security precautions.
+ type: str
+ state:
+ description:
+ - Whether the account should exist or not, taking action if the state is different from what is stated.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ create_home:
+ description:
+ - Unless set to C(false), a home directory will be made for the user
+ when the account is created or if the home directory does not exist.
+ - Changed from C(createhome) to C(create_home) in Ansible 2.5.
+ type: bool
+ default: yes
+ aliases: [ createhome ]
+ move_home:
+ description:
+ - "If set to C(true) when used with C(home: ), attempt to move the user's old home
+ directory to the specified directory if it isn't there already and the old home exists."
+ type: bool
+ default: no
+ system:
+ description:
+ - When creating an account C(state=present), setting this to C(true) makes the user a system account.
+ - This setting cannot be changed on existing users.
+ type: bool
+ default: no
+ force:
+ description:
+ - This only affects C(state=absent), it forces removal of the user and associated directories on supported platforms.
+ - The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
+ - When used with C(generate_ssh_key=yes) this forces an existing key to be overwritten.
+ type: bool
+ default: no
+ remove:
+ description:
+ - This only affects C(state=absent), it attempts to remove directories associated with the user.
+ - The behavior is the same as C(userdel --remove), check the man page for details and support.
+ type: bool
+ default: no
+ login_class:
+ description:
+ - Optionally sets the user's login class, a feature of most BSD OSs.
+ type: str
+ generate_ssh_key:
+ description:
+ - Whether to generate a SSH key for the user in question.
+ - This will B(not) overwrite an existing SSH key unless used with C(force=yes).
+ type: bool
+ default: no
+ version_added: "0.9"
+ ssh_key_bits:
+ description:
+ - Optionally specify number of bits in SSH key to create.
+ - The default value depends on ssh-keygen.
+ type: int
+ version_added: "0.9"
+ ssh_key_type:
+ description:
+ - Optionally specify the type of SSH key to generate.
+ - Available SSH key types will depend on implementation
+ present on target host.
+ type: str
+ default: rsa
+ version_added: "0.9"
+ ssh_key_file:
+ description:
+ - Optionally specify the SSH key filename.
+ - If this is a relative filename then it will be relative to the user's home directory.
+ - This parameter defaults to I(.ssh/id_rsa).
+ type: path
+ version_added: "0.9"
+ ssh_key_comment:
+ description:
+ - Optionally define the comment for the SSH key.
+ type: str
+ default: ansible-generated on $HOSTNAME
+ version_added: "0.9"
+ ssh_key_passphrase:
+ description:
+ - Set a passphrase for the SSH key.
+ - If no passphrase is provided, the SSH key will default to having no passphrase.
+ type: str
+ version_added: "0.9"
+ update_password:
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+ choices: [ always, on_create ]
+ default: always
+ version_added: "1.3"
+ expires:
+ description:
+ - An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
+ - Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
+ - Since Ansible 2.6 you can remove the expiry time by specifying a negative value.
+ Currently supported on GNU/Linux and FreeBSD.
+ type: float
+ version_added: "1.9"
+ password_lock:
+ description:
+ - Lock the password (C(usermod -L), C(usermod -U), C(pw lock)).
+ - Implementation differs by platform. This option does not always mean the user cannot login using other methods.
+ - This option does not disable the user, only lock the password.
+ - This must be set to C(False) in order to unlock a currently locked password. The absence of this parameter will not unlock a password.
+ - Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
+ type: bool
+ version_added: "2.6"
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local users
+ (in other words, it uses C(luseradd) instead of C(useradd)).
+ - This will check C(/etc/passwd) for an existing account before invoking commands. If the local account database
+ exists somewhere other than C(/etc/passwd), this setting will not work properly.
+ - This requires that the above commands as well as C(/etc/passwd) must exist on the target host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.4"
+ profile:
+ description:
+ - Sets the profile of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple profiles using comma separation.
+ - To delete all the profiles, use C(profile='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ authorization:
+ description:
+ - Sets the authorization of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple authorizations using comma separation.
+ - To delete all authorizations, use C(authorization='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ role:
+ description:
+ - Sets the role of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple roles using comma separation.
+ - To delete all roles, use C(role='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ password_expire_max:
+ description:
+ - Maximum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+ password_expire_min:
+ description:
+ - Minimum number of days between password change.
+ - Supported on Linux only.
+ type: int
+ version_added: "2.11"
+ umask:
+ description:
+ - Sets the umask of the user.
+ - Does nothing when used with other platforms.
+ - Currently supported on Linux.
+ - Requires C(local) is omitted or False.
+ type: str
+ version_added: "2.12"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - There are specific requirements per platform on user management utilities. However
+ they generally come pre-installed with the system and Ansible will require they
+ are present at runtime. If they are not, a descriptive error message will be shown.
+ - On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
+ On other platforms, the shadow file is backed up by the underlying tools used by this module.
+ - On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
+ modify group membership. Accounts are hidden from the login window by modifying
+ C(/Library/Preferences/com.apple.loginwindow.plist).
+ - On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
+ C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
+ - On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
+ C(userdel) to remove accounts.
+seealso:
+- module: ansible.posix.authorized_key
+- module: ansible.builtin.group
+- module: ansible.windows.win_user
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = r'''
+- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
+ ansible.builtin.user:
+ name: johnd
+ comment: John Doe
+ uid: 1040
+ group: admin
+
+- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
+ ansible.builtin.user:
+ name: james
+ shell: /bin/bash
+ groups: admins,developers
+ append: yes
+
+- name: Remove the user 'johnd'
+ ansible.builtin.user:
+ name: johnd
+ state: absent
+ remove: yes
+
+- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
+ ansible.builtin.user:
+ name: jsmith
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_file: .ssh/id_rsa
+
+- name: Added a consultant whose account you want to expire
+ ansible.builtin.user:
+ name: james18
+ shell: /bin/zsh
+ groups: developers
+ expires: 1422403387
+
+- name: Starting at Ansible 2.6, modify user, remove expiry time
+ ansible.builtin.user:
+ name: james18
+ expires: -1
+
+- name: Set maximum expiration date for password
+ ansible.builtin.user:
+ name: ram19
+ password_expire_max: 10
+
+- name: Set minimum expiration date for password
+ ansible.builtin.user:
+ name: pushkar15
+ password_expire_min: 5
+'''
+
+RETURN = r'''
+append:
+ description: Whether or not to append the user to groups.
+ returned: When state is C(present) and the user exists
+ type: bool
+ sample: True
+comment:
+ description: Comment section from passwd file, usually the user name.
+ returned: When user exists
+ type: str
+ sample: Agent Smith
+create_home:
+ description: Whether or not to create the home directory.
+ returned: When user does not exist and not check mode
+ type: bool
+ sample: True
+force:
+ description: Whether or not a user account was forcibly deleted.
+ returned: When I(state) is C(absent) and user exists
+ type: bool
+ sample: False
+group:
+ description: Primary user group ID
+ returned: When user exists
+ type: int
+ sample: 1001
+groups:
+ description: List of groups of which the user is a member.
+ returned: When I(groups) is not empty and I(state) is C(present)
+ type: str
+ sample: 'chrony,apache'
+home:
+ description: "Path to user's home directory."
+ returned: When I(state) is C(present)
+ type: str
+ sample: '/home/asmith'
+move_home:
+ description: Whether or not to move an existing home directory.
+ returned: When I(state) is C(present) and user exists
+ type: bool
+ sample: False
+name:
+ description: User account name.
+ returned: always
+ type: str
+ sample: asmith
+password:
+ description: Masked value of the password.
+ returned: When I(state) is C(present) and I(password) is not empty
+ type: str
+ sample: 'NOT_LOGGING_PASSWORD'
+remove:
+ description: Whether or not to remove the user account.
+ returned: When I(state) is C(absent) and user exists
+ type: bool
+ sample: True
+shell:
+ description: User login shell.
+ returned: When I(state) is C(present)
+ type: str
+ sample: '/bin/bash'
+ssh_fingerprint:
+ description: Fingerprint of generated SSH key.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
+ssh_key_file:
+ description: Path to generated SSH private key file.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: /home/asmith/.ssh/id_rsa
+ssh_public_key:
+ description: Generated SSH public key file.
+ returned: When I(generate_ssh_key) is C(True)
+ type: str
+ sample: >
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
+ 618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
+ d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
+stderr:
+ description: Standard error from running commands.
+ returned: When stderr is returned by a command that is run
+ type: str
+ sample: Group wheels does not exist
+stdout:
+ description: Standard output from running commands.
+ returned: When standard output is returned by the command that is run
+ type: str
+ sample:
+system:
+ description: Whether or not the account is a system account.
+ returned: When I(system) is passed to the module and the account does not exist
+ type: bool
+ sample: True
+uid:
+ description: User ID of the user account.
+ returned: When I(uid) is passed to the module
+ type: int
+ sample: 1044
+password_expire_max:
+ description: Maximum number of days during which a password is valid.
+ returned: When user exists
+ type: int
+ sample: 20
+password_expire_min:
+ description: Minimum number of days between password change
+ returned: When user exists
+ type: int
+ sample: 20
+'''
+
+
+import ctypes
+import ctypes.util
+import errno
+import grp
+import calendar
+import os
+import re
+import pty
+import pwd
+import select
+import shutil
+import socket
+import subprocess
+import time
+import math
+
+from ansible.module_utils import distro
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.sys_info import get_platform_subclass
+import ansible.module_utils.compat.typing as t
+
+
+class StructSpwdType(ctypes.Structure):
+ _fields_ = [
+ ('sp_namp', ctypes.c_char_p),
+ ('sp_pwdp', ctypes.c_char_p),
+ ('sp_lstchg', ctypes.c_long),
+ ('sp_min', ctypes.c_long),
+ ('sp_max', ctypes.c_long),
+ ('sp_warn', ctypes.c_long),
+ ('sp_inact', ctypes.c_long),
+ ('sp_expire', ctypes.c_long),
+ ('sp_flag', ctypes.c_ulong),
+ ]
+
+
+try:
+ _LIBC = ctypes.cdll.LoadLibrary(
+ t.cast(
+ str,
+ ctypes.util.find_library('c')
+ )
+ )
+ _LIBC.getspnam.argtypes = (ctypes.c_char_p,)
+ _LIBC.getspnam.restype = ctypes.POINTER(StructSpwdType)
+ HAVE_SPWD = True
+except AttributeError:
+ HAVE_SPWD = False
+
+
+_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
+
+
+def getspnam(b_name):
+ return _LIBC.getspnam(b_name).contents
+
+
+class User(object):
+ """
+ This is a generic User manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - ssh_key_gen()
+ - ssh_key_fingerprint()
+ - user_exists()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+ platform = 'Generic'
+ distribution = None # type: str | None
+ PASSWORDFILE = '/etc/passwd'
+ SHADOWFILE = '/etc/shadow' # type: str | None
+ SHADOWFILE_EXPIRE_INDEX = 7
+ LOGIN_DEFS = '/etc/login.defs'
+ DATE_FORMAT = '%Y-%m-%d'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(User)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.uid = module.params['uid']
+ self.hidden = module.params['hidden']
+ self.non_unique = module.params['non_unique']
+ self.seuser = module.params['seuser']
+ self.group = module.params['group']
+ self.comment = module.params['comment']
+ self.shell = module.params['shell']
+ self.password = module.params['password']
+ self.force = module.params['force']
+ self.remove = module.params['remove']
+ self.create_home = module.params['create_home']
+ self.move_home = module.params['move_home']
+ self.skeleton = module.params['skeleton']
+ self.system = module.params['system']
+ self.login_class = module.params['login_class']
+ self.append = module.params['append']
+ self.sshkeygen = module.params['generate_ssh_key']
+ self.ssh_bits = module.params['ssh_key_bits']
+ self.ssh_type = module.params['ssh_key_type']
+ self.ssh_comment = module.params['ssh_key_comment']
+ self.ssh_passphrase = module.params['ssh_key_passphrase']
+ self.update_password = module.params['update_password']
+ self.home = module.params['home']
+ self.expires = None
+ self.password_lock = module.params['password_lock']
+ self.groups = None
+ self.local = module.params['local']
+ self.profile = module.params['profile']
+ self.authorization = module.params['authorization']
+ self.role = module.params['role']
+ self.password_expire_max = module.params['password_expire_max']
+ self.password_expire_min = module.params['password_expire_min']
+ self.umask = module.params['umask']
+
+ if self.umask is not None and self.local:
+ module.fail_json(msg="'umask' can not be used with 'local'")
+
+ if module.params['groups'] is not None:
+ self.groups = ','.join(module.params['groups'])
+
+ if module.params['expires'] is not None:
+ try:
+ self.expires = time.gmtime(module.params['expires'])
+ except Exception as e:
+ module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
+
+ if module.params['ssh_key_file'] is not None:
+ self.ssh_file = module.params['ssh_key_file']
+ else:
+ self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
+
+ if self.groups is None and self.append:
+ # Change the argument_spec in 2.14 and remove this warning
+ # required_by={'append': ['groups']}
+ module.warn("'append' is set, but no 'groups' are specified. Use 'groups' for appending new groups."
+ "This will change to an error in Ansible 2.14.")
+
+ def check_password_encrypted(self):
+ # Darwin needs cleartext password, so skip validation
+ if self.module.params['password'] and self.platform != 'Darwin':
+ maybe_invalid = False
+
+ # Allow setting certain passwords in order to disable the account
+ if self.module.params['password'] in set(['*', '!', '*************']):
+ maybe_invalid = False
+ else:
+ # : for delimiter, * for disable user, ! for lock user
+ # these characters are invalid in the password
+ if any(char in self.module.params['password'] for char in ':*!'):
+ maybe_invalid = True
+ if '$' not in self.module.params['password']:
+ maybe_invalid = True
+ else:
+ fields = self.module.params['password'].split("$")
+ if len(fields) >= 3:
+ # contains character outside the crypto constraint
+ if bool(_HASH_RE.search(fields[-1])):
+ maybe_invalid = True
+ # md5
+ if fields[1] == '1' and len(fields[-1]) != 22:
+ maybe_invalid = True
+ # sha256
+ if fields[1] == '5' and len(fields[-1]) != 43:
+ maybe_invalid = True
+ # sha512
+ if fields[1] == '6' and len(fields[-1]) != 86:
+ maybe_invalid = True
+ else:
+ maybe_invalid = True
+ if maybe_invalid:
+ self.module.warn("The input password appears not to have been hashed. "
+ "The 'password' argument must be encrypted for this module to work properly.")
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
+ if self.module.check_mode and obey_checkmode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+ else:
+ # cast all args to strings ansible-modules-core/issues/4397
+ cmd = [str(x) for x in cmd]
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def backup_shadow(self):
+ if not self.module.check_mode and self.SHADOWFILE:
+ return self.module.backup_local(self.SHADOWFILE)
+
+ def remove_user_userdel(self):
+ if self.local:
+ command_name = 'luserdel'
+ else:
+ command_name = 'userdel'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ if self.force and not self.local:
+ cmd.append('-f')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self):
+
+ if self.local:
+ command_name = 'luseradd'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ else:
+ command_name = 'useradd'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.seuser is not None:
+ cmd.append('-Z')
+ cmd.append(self.seuser)
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+ elif self.group_exists(self.name):
+ # use the -N option (no user group) if a group already
+ # exists with the same name as the user to prevent
+ # errors from useradd trying to create a group when
+ # USERGROUPS_ENAB is set in /etc/login.defs.
+ if self.local:
+ # luseradd uses -n instead of -N
+ cmd.append('-n')
+ else:
+ if os.path.exists('/etc/redhat-release'):
+ dist = distro.version()
+ major_release = int(dist.split('.')[0])
+ if major_release <= 5:
+ cmd.append('-n')
+ else:
+ cmd.append('-N')
+ elif os.path.exists('/etc/SuSE-release'):
+ # -N did not exist in useradd before SLE 11 and did not
+ # automatically create a group
+ dist = distro.version()
+ major_release = int(dist.split('.')[0])
+ if major_release >= 12:
+ cmd.append('-N')
+ else:
+ cmd.append('-N')
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ if not self.local:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ # If the specified path to the user home contains parent directories that
+ # do not exist and create_home is True first create the parent directory
+ # since useradd cannot create it.
+ if self.create_home:
+ parent = os.path.dirname(self.home)
+ if not os.path.isdir(parent):
+ self.create_homedir(self.home)
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None and not self.local:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('')
+ else:
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ if self.password is not None:
+ cmd.append('-p')
+ if self.password_lock:
+ cmd.append('!%s' % self.password)
+ else:
+ cmd.append(self.password)
+
+ if self.create_home:
+ if not self.local:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if not self.local or rc != 0:
+ return (rc, out, err)
+
+ if self.expires is not None:
+ if self.expires < time.gmtime(0):
+ lexpires = -1
+ else:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if self.groups is None or len(self.groups) == 0:
+ return (rc, out, err)
+
+ for add_group in groups:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def _check_usermod_append(self):
+ # check if this version of usermod can append groups
+
+ if self.local:
+ command_name = 'lusermod'
+ else:
+ command_name = 'usermod'
+
+ usermod_path = self.module.get_bin_path(command_name, True)
+
+ # for some reason, usermod --help cannot be used by non root
+ # on RH/Fedora, due to lack of execute bit for others
+ if not os.access(usermod_path, os.X_OK):
+ return False
+
+ cmd = [usermod_path, '--help']
+ (rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
+ helpout = data1 + data2
+
+ # check if --append exists
+ lines = to_native(helpout).split('\n')
+ for line in lines:
+ if line.strip().startswith('-a, --append'):
+ return True
+
+ return False
+
+ def modify_user_usermod(self):
+
+ if self.local:
+ command_name = 'lusermod'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lgroupmod_add = set()
+ lgroupmod_del = set()
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ lexpires = None
+ else:
+ command_name = 'usermod'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.user_info()
+ has_append = self._check_usermod_append()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(ginfo[2])
+
+ if self.groups is not None:
+ # get a list of all groups for the user, including the primary
+ current_groups = self.user_group_membership(exclude_primary=False)
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ if has_append:
+ cmd.append('-a')
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if self.local:
+ if self.append:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set()
+ else:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set(current_groups).difference(groups)
+ else:
+ if self.append and not has_append:
+ cmd.append('-A')
+ cmd.append(','.join(group_diff))
+ else:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ if self.expires < time.gmtime(0):
+ if current_expires >= 0:
+ if self.local:
+ lexpires = -1
+ else:
+ cmd.append('-e')
+ cmd.append('')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires * 86400)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
+ if self.local:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ else:
+ cmd.append('-e')
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ # Lock if no password or unlocked, unlock only if locked
+ if self.password_lock and not info[1].startswith('!'):
+ cmd.append('-L')
+ elif self.password_lock is False and info[1].startswith('!'):
+ # usermod will refuse to unlock a user with no password, module shows 'changed' regardless
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None and info[1].lstrip('!') != self.password.lstrip('!'):
+ # Remove options that are mutually exclusive with -p
+ cmd = [c for c in cmd if c not in ['-U', '-L']]
+ cmd.append('-p')
+ if self.password_lock:
+ # Lock the account and set the hash in a single command
+ cmd.append('!%s' % self.password)
+ else:
+ cmd.append(self.password)
+
+ (rc, out, err) = (None, '', '')
+
+ # skip if no usermod changes to be made
+ if len(cmd) > 1:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ if not self.local or not (rc is None or rc == 0):
+ return (rc, out, err)
+
+ if lexpires is not None:
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if len(lgroupmod_add) == 0 and len(lgroupmod_del) == 0:
+ return (rc, out, err)
+
+ for add_group in lgroupmod_add:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ for del_group in lgroupmod_del:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-m', self.name, del_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def group_exists(self, group):
+ try:
+ # Try group as a gid first
+ grp.getgrgid(int(group))
+ return True
+ except (ValueError, KeyError):
+ try:
+ grp.getgrnam(group)
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self, group):
+ if not self.group_exists(group):
+ return False
+ try:
+ # Try group as a gid first
+ return list(grp.getgrgid(int(group)))
+ except (ValueError, KeyError):
+ return list(grp.getgrnam(group))
+
+ def get_groups_set(self, remove_existing=True):
+ if self.groups is None:
+ return None
+ info = self.user_info()
+ groups = set(x.strip() for x in self.groups.split(',') if x)
+ for g in groups.copy():
+ if not self.group_exists(g):
+ self.module.fail_json(msg="Group %s does not exist" % (g))
+ if info and remove_existing and self.group_info(g)[2] == info[3]:
+ groups.remove(g)
+ return groups
+
+ def user_group_membership(self, exclude_primary=True):
+ ''' Return a list of groups the user belongs to '''
+ groups = []
+ info = self.get_pwd_info()
+ for group in grp.getgrall():
+ if self.name in group.gr_mem:
+ # Exclude the user's primary group by default
+ if not exclude_primary:
+ groups.append(group[0])
+ else:
+ if info[3] != group.gr_gid:
+ groups.append(group[0])
+
+ return groups
+
+ def user_exists(self):
+ # The pwd module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not an account exists locally.
+ # It returns True if the account exists locally or in the directory, so instead
+ # look in the local PASSWORD file for an existing account.
+ if self.local:
+ if not os.path.exists(self.PASSWORDFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local account file {0} to parse.".format(self.PASSWORDFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.PASSWORDFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and user '{name}' was not found in {file}. "
+ "The local user account may already exist if the local account database exists "
+ "somewhere other than {file}.".format(file=self.PASSWORDFILE, name=self.name))
+
+ return exists
+
+ else:
+ try:
+ if pwd.getpwnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def get_pwd_info(self):
+ if not self.user_exists():
+ return False
+ return list(pwd.getpwnam(self.name))
+
+ def user_info(self):
+ if not self.user_exists():
+ return False
+ info = self.get_pwd_info()
+ if len(info[1]) == 1 or len(info[1]) == 0:
+ info[1] = self.user_password()[0]
+ return info
+
+ def set_password_expire(self):
+ min_needs_change = self.password_expire_min is not None
+ max_needs_change = self.password_expire_max is not None
+
+ if HAVE_SPWD:
+ try:
+ shadow_info = getspnam(to_bytes(self.name))
+ except ValueError:
+ return None, '', ''
+
+ min_needs_change &= self.password_expire_min != shadow_info.sp_min
+ max_needs_change &= self.password_expire_max != shadow_info.sp_max
+
+ if not (min_needs_change or max_needs_change):
+ return (None, '', '') # target state already reached
+
+ command_name = 'chage'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ if min_needs_change:
+ cmd.extend(["-m", self.password_expire_min])
+ if max_needs_change:
+ cmd.extend(["-M", self.password_expire_max])
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def user_password(self):
+ passwd = ''
+ expires = ''
+ if HAVE_SPWD:
+ try:
+ shadow_info = getspnam(to_bytes(self.name))
+ passwd = to_native(shadow_info.sp_pwdp)
+ expires = shadow_info.sp_expire
+ return passwd, expires
+ except ValueError:
+ return passwd, expires
+
+ if not self.user_exists():
+ return passwd, expires
+ elif self.SHADOWFILE:
+ passwd, expires = self.parse_shadow_file()
+
+ return passwd, expires
+
+ def parse_shadow_file(self):
+ passwd = ''
+ expires = ''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ passwd = line.split(':')[1]
+ expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
+ return passwd, expires
+
+ def get_ssh_key_path(self):
+ info = self.user_info()
+ if os.path.isabs(self.ssh_file):
+ ssh_key_file = self.ssh_file
+ else:
+ if not os.path.exists(info[5]) and not self.module.check_mode:
+ raise Exception('User %s home directory does not exist' % self.name)
+ ssh_key_file = os.path.join(info[5], self.ssh_file)
+ return ssh_key_file
+
+ def ssh_key_gen(self):
+ info = self.user_info()
+ overwrite = None
+ try:
+ ssh_key_file = self.get_ssh_key_path()
+ except Exception as e:
+ return (1, '', to_native(e))
+ ssh_dir = os.path.dirname(ssh_key_file)
+ if not os.path.exists(ssh_dir):
+ if self.module.check_mode:
+ return (0, '', '')
+ try:
+ os.mkdir(ssh_dir, int('0700', 8))
+ os.chown(ssh_dir, info[2], info[3])
+ except OSError as e:
+ return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
+ if os.path.exists(ssh_key_file):
+ if self.force:
+ # ssh-keygen doesn't support overwriting the key interactively, so send 'y' to confirm
+ overwrite = 'y'
+ else:
+ return (None, 'Key already exists, use "force: yes" to overwrite', '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-t')
+ cmd.append(self.ssh_type)
+ if self.ssh_bits > 0:
+ cmd.append('-b')
+ cmd.append(self.ssh_bits)
+ cmd.append('-C')
+ cmd.append(self.ssh_comment)
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+ if self.ssh_passphrase is not None:
+ if self.module.check_mode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+
+ master_in_fd, slave_in_fd = pty.openpty()
+ master_out_fd, slave_out_fd = pty.openpty()
+ master_err_fd, slave_err_fd = pty.openpty()
+ env = os.environ.copy()
+ env['LC_ALL'] = get_best_parsable_locale(self.module)
+ try:
+ p = subprocess.Popen([to_bytes(c) for c in cmd],
+ stdin=slave_in_fd,
+ stdout=slave_out_fd,
+ stderr=slave_err_fd,
+ preexec_fn=os.setsid,
+ env=env)
+ out_buffer = b''
+ err_buffer = b''
+ while p.poll() is None:
+ r_list = select.select([master_out_fd, master_err_fd], [], [], 1)[0]
+ first_prompt = b'Enter passphrase (empty for no passphrase):'
+ second_prompt = b'Enter same passphrase again'
+ prompt = first_prompt
+ for fd in r_list:
+ if fd == master_out_fd:
+ chunk = os.read(master_out_fd, 10240)
+ out_buffer += chunk
+ if prompt in out_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ else:
+ chunk = os.read(master_err_fd, 10240)
+ err_buffer += chunk
+ if prompt in err_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
+ # The key was created between us checking for existence and now
+ return (None, 'Key already exists', '')
+
+ rc = p.returncode
+ out = to_native(out_buffer)
+ err = to_native(err_buffer)
+ except OSError as e:
+ return (1, '', to_native(e))
+ else:
+ cmd.append('-N')
+ cmd.append('')
+
+ (rc, out, err) = self.execute_command(cmd, data=overwrite)
+
+ if rc == 0 and not self.module.check_mode:
+ # If the keys were successfully created, we should be able
+ # to tweak ownership.
+ os.chown(ssh_key_file, info[2], info[3])
+ os.chown('%s.pub' % ssh_key_file, info[2], info[3])
+ return (rc, out, err)
+
+ def ssh_key_fingerprint(self):
+ ssh_key_file = self.get_ssh_key_path()
+ if not os.path.exists(ssh_key_file):
+ return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-l')
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+
+ return self.execute_command(cmd, obey_checkmode=False)
+
+ def get_ssh_public_key(self):
+ ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
+ try:
+ with open(ssh_public_key_file, 'r') as f:
+ ssh_public_key = f.read().strip()
+ except IOError:
+ return None
+ return ssh_public_key
+
+ def create_user(self):
+ # by default we use the create_user_useradd method
+ return self.create_user_useradd()
+
+ def remove_user(self):
+ # by default we use the remove_user_userdel method
+ return self.remove_user_userdel()
+
+ def modify_user(self):
+ # by default we use the modify_user_usermod method
+ return self.modify_user_usermod()
+
+ def create_homedir(self, path):
+ if not os.path.exists(path):
+ if self.skeleton is not None:
+ skeleton = self.skeleton
+ else:
+ skeleton = '/etc/skel'
+
+ if os.path.exists(skeleton):
+ try:
+ shutil.copytree(skeleton, path, symlinks=True)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ else:
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ # get umask from /etc/login.defs and set correct home mode
+ if os.path.exists(self.LOGIN_DEFS):
+ with open(self.LOGIN_DEFS, 'r') as f:
+ for line in f:
+ m = re.match(r'^UMASK\s+(\d+)$', line)
+ if m:
+ umask = int(m.group(1), 8)
+ mode = 0o777 & ~umask
+ try:
+ os.chmod(path, mode)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+ def chown_homedir(self, uid, gid, path):
+ try:
+ os.chown(path, uid, gid)
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ os.chown(os.path.join(root, d), uid, gid)
+ for f in files:
+ os.chown(os.path.join(root, f), uid, gid)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+
+# ===========================================
+
+class FreeBsdUser(User):
+ """
+ This is a FreeBSD User manipulation class - it uses the pw command
+ to manipulate the user database, followed by the chpass command
+ to change the password.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+ SHADOWFILE_EXPIRE_INDEX = 6
+ DATE_FORMAT = '%d-%b-%Y'
+
+ def _handle_lock(self):
+ info = self.user_info()
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'lock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'unlock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+
+ return (None, '', '')
+
+ def remove_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'userdel',
+ '-n',
+ self.name
+ ]
+ if self.remove:
+ cmd.append('-r')
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'useradd',
+ '-n',
+ self.name,
+ ]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.expires is not None:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('0')
+ else:
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ # system cannot be handled currently - should we error if its requested?
+ # create the user
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # we have to set the password in a second command
+ if self.password is not None:
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ _rc, _out, _err = self.execute_command(cmd)
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ # we have to lock/unlock the password in a distinct command
+ _rc, _out, _err = self._handle_lock()
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ return (rc, out, err)
+
+ def modify_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'usermod',
+ '-n',
+ self.name
+ ]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
+ cmd.append('-m')
+ if info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ user_login_class = line.split(':')[4]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ # If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
+ # In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
+ if self.expires <= time.gmtime(0):
+ if current_expires > 0:
+ cmd.append('-e')
+ cmd.append('0')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
+ cmd.append('-e')
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ (rc, out, err) = (None, '', '')
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ (rc, _out, _err) = self.execute_command(cmd)
+ out += _out
+ err += _err
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # we have to set the password in a second command
+ if self.update_password == 'always' and self.password is not None and info[1].lstrip('*LOCKED*') != self.password.lstrip('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ _rc, _out, _err = self.execute_command(cmd)
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ # we have to lock/unlock the password in a distinct command
+ _rc, _out, _err = self._handle_lock()
+ if rc is None:
+ rc = _rc
+ out += _out
+ err += _err
+
+ return (rc, out, err)
+
+
+class DragonFlyBsdUser(FreeBsdUser):
+ """
+ This is a DragonFlyBSD User manipulation class - it inherits the
+ FreeBsdUser class behaviors, such as using the pw command to
+ manipulate the user database, followed by the chpass command
+ to change the password.
+ """
+
+ platform = 'DragonFly'
+
+
+class OpenBSDUser(User):
+ """
+ This is a OpenBSD User manipulation class.
+ Main differences are that OpenBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None and self.password != '*':
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups_option = '-S'
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_option = '-G'
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append(groups_option)
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
+ (rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
+
+ for line in out.splitlines():
+ tokens = line.split()
+
+ if tokens[0] == 'class' and len(tokens) == 2:
+ user_login_class = tokens[1]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password_lock and not info[1].startswith('*'):
+ cmd.append('-Z')
+ elif self.password_lock is False and info[1].startswith('*'):
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None \
+ and self.password != '*' and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class NetBSDUser(User):
+ """
+ This is a NetBSD User manipulation class.
+ Main differences are that NetBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups = set(current_groups).union(groups)
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd.append('-C yes')
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd.append('-C no')
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class SunOS(User):
+ """
+ This is a SunOS User manipulation class - The main difference between
+ this class and the generic user class is that Solaris-type distros
+ don't support the concept of a "system" account and we need to
+ edit the /etc/shadow file manually to set a password. (Ugh)
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - user_info()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+ USER_ATTR = '/etc/user_attr'
+
+ def get_password_defaults(self):
+ # Read password aging defaults
+ try:
+ minweeks = ''
+ maxweeks = ''
+ warnweeks = ''
+ with open("/etc/default/passwd", 'r') as f:
+ for line in f:
+ line = line.strip()
+ if (line.startswith('#') or line == ''):
+ continue
+ m = re.match(r'^([^#]*)#(.*)$', line)
+ if m: # The line contains a hash / comment
+ line = m.group(1)
+ key, value = line.split('=')
+ if key == "MINWEEKS":
+ minweeks = value.rstrip('\n')
+ elif key == "MAXWEEKS":
+ maxweeks = value.rstrip('\n')
+ elif key == "WARNWEEKS":
+ warnweeks = value.rstrip('\n')
+ except Exception as err:
+ self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
+
+ return (minweeks, maxweeks, warnweeks)
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.profile is not None:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ cmd.append(self.name)
+
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if not self.module.check_mode:
+ # we have to set the password by editing the /etc/shadow file
+ if self.password is not None:
+ self.backup_shadow()
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ try:
+ fields[3] = str(int(minweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if maxweeks:
+ try:
+ fields[4] = str(int(maxweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if warnweeks:
+ try:
+ fields[5] = str(int(warnweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups.update(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.profile is not None and info[7] != self.profile:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None and info[8] != self.authorization:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None and info[9] != self.role:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+ else:
+ (rc, out, err) = (None, '', '')
+
+ # we have to set the password by editing the /etc/shadow file
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ self.backup_shadow()
+ (rc, out, err) = (0, '', '')
+ if not self.module.check_mode:
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ fields[3] = str(int(minweeks) * 7)
+ if maxweeks:
+ fields[4] = str(int(maxweeks) * 7)
+ if warnweeks:
+ fields[5] = str(int(warnweeks) * 7)
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ rc = 0
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def user_info(self):
+ info = super(SunOS, self).user_info()
+ if info:
+ info += self._user_attr_info()
+ return info
+
+ def _user_attr_info(self):
+ info = [''] * 3
+ with open(self.USER_ATTR, 'r') as file_handler:
+ for line in file_handler:
+ lines = line.strip().split('::::')
+ if lines[0] == self.name:
+ tmp = dict(x.split('=') for x in lines[1].split(';'))
+ info[0] = tmp.get('profiles', '')
+ info[1] = tmp.get('auths', '')
+ info[2] = tmp.get('roles', '')
+ return info
+
+
+class DarwinUser(User):
+ """
+ This is a Darwin macOS User manipulation class.
+ Main differences are that Darwin:-
+ - Handles accounts in a database managed by dscl(1)
+ - Has no useradd/groupadd
+ - Does not create home directories
+ - User password must be cleartext
+ - UID must be given
+ - System users must ben under 500
+
+ This overrides the following methods from the generic class:-
+ - user_exists()
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+ platform = 'Darwin'
+ distribution = None
+ SHADOWFILE = None
+
+ dscl_directory = '.'
+
+ fields = [
+ ('comment', 'RealName'),
+ ('home', 'NFSHomeDirectory'),
+ ('shell', 'UserShell'),
+ ('uid', 'UniqueID'),
+ ('group', 'PrimaryGroupID'),
+ ('hidden', 'IsHidden'),
+ ]
+
+ def __init__(self, module):
+
+ super(DarwinUser, self).__init__(module)
+
+ # make the user hidden if option is set or deffer to system option
+ if self.hidden is None:
+ if self.system:
+ self.hidden = 1
+ elif self.hidden:
+ self.hidden = 1
+ else:
+ self.hidden = 0
+
+ # add hidden to processing if set
+ if self.hidden is not None:
+ self.fields.append(('hidden', 'IsHidden'))
+
+ def _get_dscl(self):
+ return [self.module.get_bin_path('dscl', True), self.dscl_directory]
+
+ def _list_user_groups(self):
+ cmd = self._get_dscl()
+ cmd += ['-search', '/Groups', 'GroupMembership', self.name]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ groups = []
+ for line in out.splitlines():
+ if line.startswith(' ') or line.startswith(')'):
+ continue
+ groups.append(line.split()[0])
+ return groups
+
+ def _get_user_property(self, property):
+ '''Return user PROPERTY as given my dscl(1) read or None if not found.'''
+ cmd = self._get_dscl()
+ cmd += ['-read', '/Users/%s' % self.name, property]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ return None
+ # from dscl(1)
+ # if property contains embedded spaces, the list will instead be
+ # displayed one entry per line, starting on the line after the key.
+ lines = out.splitlines()
+ # sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
+ if len(lines) == 1:
+ return lines[0].split(': ')[1]
+ if len(lines) > 2:
+ return '\n'.join([lines[1].strip()] + lines[2:])
+ if len(lines) == 2:
+ return lines[1].strip()
+ return None
+
+ def _get_next_uid(self, system=None):
+ '''
+ Return the next available uid. If system=True, then
+ uid should be below of 500, if possible.
+ '''
+ cmd = self._get_dscl()
+ cmd += ['-list', '/Users', 'UniqueID']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ self.module.fail_json(
+ msg="Unable to get the next available uid",
+ rc=rc,
+ out=out,
+ err=err
+ )
+
+ max_uid = 0
+ max_system_uid = 0
+ for line in out.splitlines():
+ current_uid = int(line.split(' ')[-1])
+ if max_uid < current_uid:
+ max_uid = current_uid
+ if max_system_uid < current_uid and current_uid < 500:
+ max_system_uid = current_uid
+
+ if system and (0 < max_system_uid < 499):
+ return max_system_uid + 1
+ return max_uid + 1
+
+ def _change_user_password(self):
+ '''Change password for SELF.NAME against SELF.PASSWORD.
+
+ Please note that password must be cleartext.
+ '''
+ # some documentation on how is stored passwords on OSX:
+ # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
+ # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
+ # http://pastebin.com/RYqxi7Ca
+ # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
+ # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
+ # https://gist.github.com/nueh/8252572
+ cmd = self._get_dscl()
+ if self.password:
+ cmd += ['-passwd', '/Users/%s' % self.name, self.password]
+ else:
+ cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _make_group_numerical(self):
+ '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
+ if self.group is None:
+ self.group = 'nogroup'
+ try:
+ self.group = grp.getgrnam(self.group).gr_gid
+ except KeyError:
+ self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
+ # We need to pass a string to dscl
+ self.group = str(self.group)
+
+ def __modify_group(self, group, action):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+ if action == 'add':
+ option = '-a'
+ else:
+ option = '-d'
+ cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
+ % (action, self.name, group), err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _modify_group(self):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+
+ rc = 0
+ out = ''
+ err = ''
+ changed = False
+
+ current = set(self._list_user_groups())
+ if self.groups is not None:
+ target = set(self.groups.split(','))
+ else:
+ target = set([])
+
+ if self.append is False:
+ for remove in current - target:
+ (_rc, _out, _err) = self.__modify_group(remove, 'delete')
+ rc += rc
+ out += _out
+ err += _err
+ changed = True
+
+ for add in target - current:
+ (_rc, _out, _err) = self.__modify_group(add, 'add')
+ rc += _rc
+ out += _out
+ err += _err
+ changed = True
+
+ return (rc, out, err, changed)
+
+ def _update_system_user(self):
+ '''Hide or show user on login window according SELF.SYSTEM.
+
+ Returns 0 if a change has been made, None otherwise.'''
+
+ plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
+
+ # http://support.apple.com/kb/HT5017?viewlocale=en_US
+ cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ # returned value is
+ # (
+ # "_userA",
+ # "_UserB",
+ # userc
+ # )
+ hidden_users = []
+ for x in out.splitlines()[1:-1]:
+ try:
+ x = x.split('"')[1]
+ except IndexError:
+ x = x.strip()
+ hidden_users.append(x)
+
+ if self.system:
+ if self.name not in hidden_users:
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+ else:
+ if self.name in hidden_users:
+ del (hidden_users[hidden_users.index(self.name)])
+
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+
+ def user_exists(self):
+ '''Check is SELF.NAME is a known user on the system.'''
+ cmd = self._get_dscl()
+ cmd += ['-read', '/Users/%s' % self.name, 'UniqueID']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ return rc == 0
+
+ def remove_user(self):
+ '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
+ info = self.user_info()
+
+ cmd = self._get_dscl()
+ cmd += ['-delete', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ if self.force:
+ if os.path.exists(info[5]):
+ shutil.rmtree(info[5])
+ out += "Removed %s" % info[5]
+
+ return (rc, out, err)
+
+ def create_user(self, command_name='dscl'):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ self._make_group_numerical()
+ if self.uid is None:
+ self.uid = str(self._get_next_uid(self.system))
+
+ # Homedir is not created by default
+ if self.create_home:
+ if self.home is None:
+ self.home = '/Users/%s' % self.name
+ if not self.module.check_mode:
+ if not os.path.exists(self.home):
+ os.makedirs(self.home)
+ self.chown_homedir(int(self.uid), int(self.group), self.home)
+
+ # dscl sets shell to /usr/bin/false when UserShell is not specified
+ # so set the shell to /bin/bash when the user is not a system user
+ if not self.system and self.shell is None:
+ self.shell = '/bin/bash'
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
+
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, _out, _err)
+
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+
+ self._update_system_user()
+ # here we don't care about change status since it is a creation,
+ # thus changed is always true.
+ if self.groups:
+ (rc, _out, _err, changed) = self._modify_group()
+ out += _out
+ err += _err
+ return (rc, out, err)
+
+ def modify_user(self):
+ changed = None
+ out = ''
+ err = ''
+
+ if self.group:
+ self._make_group_numerical()
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+ current = self._get_user_property(field[1])
+ if current is None or current != to_text(self.__dict__[field[0]]):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(
+ msg='Cannot update property "%s" for user "%s".'
+ % (field[0], self.name), err=err, out=out, rc=rc)
+ changed = rc
+ out += _out
+ err += _err
+ if self.update_password == 'always' and self.password is not None:
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+ changed = rc
+
+ if self.groups:
+ (rc, _out, _err, _changed) = self._modify_group()
+ out += _out
+ err += _err
+
+ if _changed is True:
+ changed = rc
+
+ rc = self._update_system_user()
+ if rc == 0:
+ changed = rc
+
+ return (changed, out, err)
+
+
+class AIX(User):
+ """
+ This is a AIX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - parse_shadow_file()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ SHADOWFILE = '/etc/security/passwd'
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self, command_name='useradd'):
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.password is not None:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ (rc, out, err) = (None, '', '')
+ else:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ (rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+ else:
+ (rc2, out2, err2) = (None, '', '')
+
+ if rc is not None:
+ return (rc, out + out2, err + err2)
+ else:
+ return (rc2, out + out2, err + err2)
+
+ def parse_shadow_file(self):
+ """Example AIX shadowfile data:
+ nobody:
+ password = *
+
+ operator1:
+ password = {ssha512}06$xxxxxxxxxxxx....
+ lastupdate = 1549558094
+
+ test1:
+ password = *
+ lastupdate = 1553695126
+
+ """
+
+ b_name = to_bytes(self.name)
+ b_passwd = b''
+ b_expires = b''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'rb') as bf:
+ b_lines = bf.readlines()
+
+ b_passwd_line = b''
+ b_expires_line = b''
+ try:
+ for index, b_line in enumerate(b_lines):
+ # Get password and lastupdate lines which come after the username
+ if b_line.startswith(b'%s:' % b_name):
+ b_passwd_line = b_lines[index + 1]
+ b_expires_line = b_lines[index + 2]
+ break
+
+ # Sanity check the lines because sometimes both are not present
+ if b' = ' in b_passwd_line:
+ b_passwd = b_passwd_line.split(b' = ', 1)[-1].strip()
+
+ if b' = ' in b_expires_line:
+ b_expires = b_expires_line.split(b' = ', 1)[-1].strip()
+
+ except IndexError:
+ self.module.fail_json(msg='Failed to parse shadow file %s' % self.SHADOWFILE)
+
+ passwd = to_native(b_passwd)
+ expires = to_native(b_expires) or -1
+ return passwd, expires
+
+
+class HPUX(User):
+ """
+ This is a HP-UX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'HP-UX'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+
+ def create_user(self):
+ cmd = ['/usr/sam/lbin/useradd.sam']
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user(self):
+ cmd = ['/usr/sam/lbin/userdel.sam']
+ if self.force:
+ cmd.append('-F')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = ['/usr/sam/lbin/usermod.sam']
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-F')
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class BusyBox(User):
+ """
+ This is the BusyBox class for use on systems that have adduser, deluser,
+ and delgroup commands. It overrides the following methods:
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('adduser', True)]
+
+ cmd.append('-D')
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg='Group {0} does not exist'.format(self.group))
+ cmd.append('-G')
+ cmd.append(self.group)
+
+ if self.comment is not None:
+ cmd.append('-g')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-h')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if not self.create_home:
+ cmd.append('-H')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.umask is not None:
+ cmd.append('-K')
+ cmd.append('UMASK=' + self.umask)
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ rc, out, err = self.execute_command(cmd)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if self.password is not None:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Add to additional groups
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ for group in groups:
+ cmd = [add_cmd_bin, self.name, group]
+ rc, out, err = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+ def remove_user(self):
+
+ cmd = [
+ self.module.get_bin_path('deluser', True),
+ self.name
+ ]
+
+ if self.remove:
+ cmd.append('--remove-home')
+
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ current_groups = self.user_group_membership()
+ groups = []
+ rc = None
+ out = ''
+ err = ''
+ info = self.user_info()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ remove_cmd_bin = self.module.get_bin_path('delgroup', True)
+
+ # Manage group membership
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ for g in groups:
+ if g in group_diff:
+ add_cmd = [add_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(add_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ for g in group_diff:
+ if g not in groups and not self.append:
+ remove_cmd = [remove_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(remove_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Manage password
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+
+class Alpine(BusyBox):
+ """
+ This is the Alpine User manipulation class. It inherits the BusyBox class
+ behaviors such as using adduser and deluser commands.
+ """
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ ssh_defaults = dict(
+ bits=0,
+ type='rsa',
+ passphrase=None,
+ comment='ansible-generated on %s' % socket.gethostname()
+ )
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True, aliases=['user']),
+ uid=dict(type='int'),
+ non_unique=dict(type='bool', default=False),
+ group=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str'),
+ home=dict(type='path'),
+ shell=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ login_class=dict(type='str'),
+ password_expire_max=dict(type='int', no_log=False),
+ password_expire_min=dict(type='int', no_log=False),
+ # following options are specific to macOS
+ hidden=dict(type='bool'),
+ # following options are specific to selinux
+ seuser=dict(type='str'),
+ # following options are specific to userdel
+ force=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ # following options are specific to useradd
+ create_home=dict(type='bool', default=True, aliases=['createhome']),
+ skeleton=dict(type='str'),
+ system=dict(type='bool', default=False),
+ # following options are specific to usermod
+ move_home=dict(type='bool', default=False),
+ append=dict(type='bool', default=False),
+ # following are specific to ssh key generation
+ generate_ssh_key=dict(type='bool'),
+ ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
+ ssh_key_type=dict(type='str', default=ssh_defaults['type']),
+ ssh_key_file=dict(type='path'),
+ ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
+ ssh_key_passphrase=dict(type='str', no_log=True),
+ update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
+ expires=dict(type='float'),
+ password_lock=dict(type='bool', no_log=False),
+ local=dict(type='bool'),
+ profile=dict(type='str'),
+ authorization=dict(type='str'),
+ role=dict(type='str'),
+ umask=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ user = User(module)
+ user.check_password_encrypted()
+
+ module.debug('User instantiated - platform %s' % user.platform)
+ if user.distribution:
+ module.debug('User instantiated - distribution %s' % user.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = user.name
+ result['state'] = user.state
+ if user.state == 'absent':
+ if user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = user.remove_user()
+ if rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ result['force'] = user.force
+ result['remove'] = user.remove
+ elif user.state == 'present':
+ if not user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Check to see if the provided home path contains parent directories
+ # that do not exist.
+ path_needs_parents = False
+ if user.home and user.create_home:
+ parent = os.path.dirname(user.home)
+ if not os.path.isdir(parent):
+ path_needs_parents = True
+
+ (rc, out, err) = user.create_user()
+
+ # If the home path had parent directories that needed to be created,
+ # make sure file permissions are correct in the created home directory.
+ if path_needs_parents:
+ info = user.user_info()
+ if info is not False:
+ user.chown_homedir(info[2], info[3], user.home)
+
+ if module.check_mode:
+ result['system'] = user.name
+ else:
+ result['system'] = user.system
+ result['create_home'] = user.create_home
+ else:
+ # modify user (note: this function is check mode aware)
+ (rc, out, err) = user.modify_user()
+ result['append'] = user.append
+ result['move_home'] = user.move_home
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if user.password is not None:
+ result['password'] = 'NOT_LOGGING_PASSWORD'
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if user.user_exists() and user.state == 'present':
+ info = user.user_info()
+ if info is False:
+ result['msg'] = "failed to look up user name: %s" % user.name
+ result['failed'] = True
+ result['uid'] = info[2]
+ result['group'] = info[3]
+ result['comment'] = info[4]
+ result['home'] = info[5]
+ result['shell'] = info[6]
+ if user.groups is not None:
+ result['groups'] = user.groups
+
+ # handle missing homedirs
+ info = user.user_info()
+ if user.home is None:
+ user.home = info[5]
+ if not os.path.exists(user.home) and user.create_home:
+ if not module.check_mode:
+ user.create_homedir(user.home)
+ user.chown_homedir(info[2], info[3], user.home)
+ result['changed'] = True
+
+ # deal with ssh key
+ if user.sshkeygen:
+ # generate ssh key (note: this function is check mode aware)
+ (rc, out, err) = user.ssh_key_gen()
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if rc == 0:
+ result['changed'] = True
+ (rc, out, err) = user.ssh_key_fingerprint()
+ if rc == 0:
+ result['ssh_fingerprint'] = out.strip()
+ else:
+ result['ssh_fingerprint'] = err.strip()
+ result['ssh_key_file'] = user.get_ssh_key_path()
+ result['ssh_public_key'] = user.get_ssh_public_key()
+
+ (rc, out, err) = user.set_password_expire()
+ if rc is None:
+ pass # target state reached, nothing to do
+ else:
+ if rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/validate_argument_spec.py b/lib/ansible/modules/validate_argument_spec.py
new file mode 100644
index 0000000..e223c94
--- /dev/null
+++ b/lib/ansible/modules/validate_argument_spec.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: validate_argument_spec
+short_description: Validate role argument specs.
+description:
+ - This module validates role arguments with a defined argument specification.
+version_added: "2.11"
+options:
+ argument_spec:
+ description:
+ - A dictionary like AnsibleModule argument_spec
+ required: true
+ provided_arguments:
+ description:
+ - A dictionary of the arguments that will be validated according to argument_spec
+author:
+ - Ansible Core Team
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.conn
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ become:
+ support: none
+ bypass_host_loop:
+ support: none
+ connection:
+ support: none
+ check_mode:
+ support: full
+ delegation:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ platforms: all
+'''
+
+EXAMPLES = r'''
+- name: verify vars needed for this task file are present when included
+ ansible.builtin.validate_argument_spec:
+ argument_spec: '{{required_data}}'
+ vars:
+ required_data:
+ # unlike spec file, just put the options in directly
+ stuff:
+ description: stuff
+ type: str
+ choices: ['who', 'knows', 'what']
+ default: what
+ but:
+ description: i guess we need one
+ type: str
+ required: true
+
+
+- name: verify vars needed for this task file are present when included, with spec from a spec file
+ ansible.builtin.validate_argument_spec:
+ argument_spec: "{{lookup('ansible.builtin.file', 'myargspec.yml')['specname']['options']}}"
+
+
+- name: verify vars needed for next include and not from inside it, also with params i'll only define there
+ block:
+ - ansible.builtin.validate_argument_spec:
+ argument_spec: "{{lookup('ansible.builtin.file', 'nakedoptions.yml'}}"
+ provided_arguments:
+ but: "that i can define on the include itself, like in it's C(vars:) keyword"
+
+ - name: the include itself
+ vars:
+ stuff: knows
+ but: nobuts!
+'''
+
+RETURN = r'''
+argument_errors:
+ description: A list of arg validation errors.
+ returned: failure
+ type: list
+ elements: str
+ sample:
+ - "error message 1"
+ - "error message 2"
+
+argument_spec_data:
+ description: A dict of the data from the 'argument_spec' arg.
+ returned: failure
+ type: dict
+ sample:
+ some_arg:
+ type: "str"
+ some_other_arg:
+ type: "int"
+ required: true
+
+validate_args_context:
+ description: A dict of info about where validate_args_spec was used
+ type: dict
+ returned: always
+ sample:
+ name: my_role
+ type: role
+ path: /home/user/roles/my_role/
+ argument_spec_name: main
+'''
diff --git a/lib/ansible/modules/wait_for.py b/lib/ansible/modules/wait_for.py
new file mode 100644
index 0000000..ada2e80
--- /dev/null
+++ b/lib/ansible/modules/wait_for.py
@@ -0,0 +1,689 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for
+short_description: Waits for a condition before continuing
+description:
+ - You can wait for a set amount of time C(timeout), this is the default if nothing is specified or just C(timeout) is specified.
+ This does not produce an error.
+ - Waiting for a port to become available is useful for when services are not immediately available after their init scripts return
+ which is true of certain Java application servers.
+ - It is also useful when starting guests with the M(community.libvirt.virt) module and needing to pause until they are ready.
+ - This module can also be used to wait for a regex match a string to be present in a file.
+ - In Ansible 1.6 and later, this module can also be used to wait for a file to be available or
+ absent on the filesystem.
+ - In Ansible 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node
+ is being rotated out of a load balancer pool.
+ - For Windows targets, use the M(ansible.windows.win_wait_for) module instead.
+version_added: "0.7"
+options:
+ host:
+ description:
+ - A resolvable hostname or IP address to wait for.
+ type: str
+ default: 127.0.0.1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for, when used with another condition it will force an error.
+ - When used without other conditions it is equivalent of just sleeping.
+ type: int
+ default: 300
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ port:
+ description:
+ - Port number to poll.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: int
+ active_connection_states:
+ description:
+ - The list of TCP connection states which are counted as active connections.
+ type: list
+ elements: str
+ default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
+ version_added: "2.3"
+ state:
+ description:
+ - Either C(present), C(started), or C(stopped), C(absent), or C(drained).
+ - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.
+ - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,
+ C(absent) will check that file is absent or removed.
+ type: str
+ choices: [ absent, drained, present, started, stopped ]
+ default: started
+ path:
+ description:
+ - Path to a file on the filesystem that must exist before continuing.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: path
+ version_added: "1.4"
+ search_regex:
+ description:
+ - Can be used to match a string in either a file or a socket connection.
+ - Defaults to a multiline regex.
+ type: str
+ version_added: "1.4"
+ exclude_hosts:
+ description:
+ - List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.
+ type: list
+ elements: str
+ version_added: "1.8"
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ - Before Ansible 2.3 this was hardcoded to 1 second.
+ type: int
+ default: 1
+ version_added: "2.3"
+ msg:
+ description:
+ - This overrides the normal error message from a failure to meet the required conditions.
+ type: str
+ version_added: "2.4"
+extends_documentation_fragment: action_common_attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ platform:
+ platforms: posix
+notes:
+ - The ability to use search_regex with a port connection was added in Ansible 1.7.
+ - Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
+ - Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
+ - Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
+ can't be modified or created by the remote user either.
+ - When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
+ so operations on the path using other modules may not work exactly as expected.
+seealso:
+- module: ansible.builtin.wait_for_connection
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - John Jarvis (@jarv)
+ - Andrii Radyk (@AnderEnder)
+'''
+
+EXAMPLES = r'''
+- name: Sleep for 300 seconds and continue with play
+ ansible.builtin.wait_for:
+ timeout: 300
+ delegate_to: localhost
+
+- name: Wait for port 8000 to become open on the host, don't start checking for 10 seconds
+ ansible.builtin.wait_for:
+ port: 8000
+ delay: 10
+
+- name: Waits for port 8000 of any IP to close active connections, don't start checking for 10 seconds
+ ansible.builtin.wait_for:
+ host: 0.0.0.0
+ port: 8000
+ delay: 10
+ state: drained
+
+- name: Wait for port 8000 of any IP to close active connections, ignoring connections for specified hosts
+ ansible.builtin.wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ exclude_hosts: 10.2.1.2,10.2.1.3
+
+- name: Wait until the file /tmp/foo is present before continuing
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+
+- name: Wait until the string "completed" is in the file /tmp/foo before continuing
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ search_regex: completed
+
+- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ search_regex: completed (?P<task>\w+)
+ register: waitfor
+- ansible.builtin.debug:
+ msg: Completed {{ waitfor['match_groupdict']['task'] }}
+
+- name: Wait until the lock file is removed
+ ansible.builtin.wait_for:
+ path: /var/lock/file.lock
+ state: absent
+
+- name: Wait until the process is finished and pid was destroyed
+ ansible.builtin.wait_for:
+ path: /proc/3466/status
+ state: absent
+
+- name: Output customized message when failed
+ ansible.builtin.wait_for:
+ path: /tmp/foo
+ state: present
+ msg: Timeout to find file /tmp/foo
+
+# Do not assume the inventory_hostname is resolvable and delay 10 seconds at start
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ ansible.builtin.wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ connection: local
+
+# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ ansible.builtin.wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ vars:
+ ansible_connection: local
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed while waiting
+ returned: always
+ type: int
+ sample: 23
+match_groups:
+ description: Tuple containing all the subgroups of the match as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groups)
+ returned: always
+ type: list
+ sample: ['match 1', 'match 2']
+match_groupdict:
+ description: Dictionary containing all the named subgroups of the match, keyed by the subgroup name,
+ as returned by U(https://docs.python.org/3/library/re.html#re.MatchObject.groupdict)
+ returned: always
+ type: dict
+ sample:
+ {
+ 'group': 'match'
+ }
+'''
+
+import binascii
+import contextlib
+import datetime
+import errno
+import math
+import mmap
+import os
+import re
+import select
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils._text import to_bytes
+
+
+HAS_PSUTIL = False
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ HAS_PSUTIL = True
+ # just because we can import it on Linux doesn't mean we will use it
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+
+
+class TCPConnectionInfo(object):
+ """
+ This is a generic TCP Connection Info strategy class that relies
+ on the psutil module, which is not ideal for targets, but necessary
+ for cross platform support.
+
+ A subclass may wish to override some or all of these methods.
+ - _get_exclude_ips()
+ - get_active_connections()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+ platform = 'Generic'
+ distribution = None
+
+ match_all_ips = {
+ socket.AF_INET: '0.0.0.0',
+ socket.AF_INET6: '::',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '::ffff',
+ 'match_all': '::ffff:0.0.0.0'
+ }
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(TCPConnectionInfo)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_ip(module.params['host'])
+ self.port = int(self.module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+ if not HAS_PSUTIL:
+ module.fail_json(msg=missing_required_lib('psutil'), exception=PSUTIL_IMP_ERR)
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_ip(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for p in psutil.process_iter():
+ try:
+ if hasattr(p, 'get_connections'):
+ connections = p.get_connections(kind='inet')
+ else:
+ connections = p.connections(kind='inet')
+ except psutil.Error:
+ # Process is Zombie or other error state
+ continue
+ for conn in connections:
+ if conn.status not in self.module.params['active_connection_states']:
+ continue
+ if hasattr(conn, 'local_address'):
+ (local_ip, local_port) = conn.local_address
+ else:
+ (local_ip, local_port) = conn.laddr
+ if self.port != local_port:
+ continue
+ if hasattr(conn, 'remote_address'):
+ (remote_ip, remote_port) = conn.remote_address
+ else:
+ (remote_ip, remote_port) = conn.raddr
+ if (conn.family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (conn.family, local_ip) in self.ips,
+ (conn.family, self.match_all_ips[conn.family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ return active_connections
+
+
+# ===========================================
+# Subclass: Linux
+
+class LinuxTCPConnectionInfo(TCPConnectionInfo):
+ """
+ This is a TCP Connection Info evaluation strategy class
+ that utilizes information from Linux's procfs. While less universal,
+ does allow Linux targets to not require an additional library.
+ """
+ platform = 'Linux'
+ distribution = None
+
+ source_file = {
+ socket.AF_INET: '/proc/net/tcp',
+ socket.AF_INET6: '/proc/net/tcp6'
+ }
+ match_all_ips = {
+ socket.AF_INET: '00000000',
+ socket.AF_INET6: '00000000000000000000000000000000',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '0000000000000000FFFF0000',
+ 'match_all': '0000000000000000FFFF000000000000'
+ }
+ local_address_field = 1
+ remote_address_field = 2
+ connection_state_field = 3
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_hex(module.params['host'])
+ self.port = "%0.4X" % int(module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_hex(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for family in self.source_file.keys():
+ if not os.path.isfile(self.source_file[family]):
+ continue
+ try:
+ f = open(self.source_file[family])
+ for tcp_connection in f.readlines():
+ tcp_connection = tcp_connection.strip().split()
+ if tcp_connection[self.local_address_field] == 'local_address':
+ continue
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
+ continue
+ (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
+ if self.port != local_port:
+ continue
+ (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
+ if (family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (family, local_ip) in self.ips,
+ (family, self.match_all_ips[family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ except IOError as e:
+ pass
+ finally:
+ f.close()
+
+ return active_connections
+
+
+def _convert_host_to_ip(host):
+ """
+ Perform forward DNS resolution on host, IP will give the same IP
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and IP
+ """
+ addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
+ ips = []
+ for family, socktype, proto, canonname, sockaddr in addrinfo:
+ ip = sockaddr[0]
+ ips.append((family, ip))
+ if family == socket.AF_INET:
+ ips.append((socket.AF_INET6, "::ffff:" + ip))
+ return ips
+
+
+def _convert_host_to_hex(host):
+ """
+ Convert the provided host to the format in /proc/net/tcp*
+
+ /proc/net/tcp uses little-endian four byte hex for ipv4
+ /proc/net/tcp6 uses little-endian per 4B word for ipv6
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and the
+ little-endian converted host
+ """
+ ips = []
+ if host is not None:
+ for family, ip in _convert_host_to_ip(host):
+ hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
+ hexip_hf = ""
+ for i in range(0, len(hexip_nf), 8):
+ ipgroup_nf = hexip_nf[i:i + 8]
+ ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
+ hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
+ ips.append((family, hexip_hf))
+ return ips
+
+
+def _timedelta_total_seconds(timedelta):
+ return (
+ timedelta.microseconds + 0.0 +
+ (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
+
+
+def get_connection_state_id(state):
+ connection_state_id = {
+ 'ESTABLISHED': '01',
+ 'SYN_SENT': '02',
+ 'SYN_RECV': '03',
+ 'FIN_WAIT1': '04',
+ 'FIN_WAIT2': '05',
+ 'TIME_WAIT': '06',
+ }
+ return connection_state_id[state]
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ timeout=dict(type='int', default=300),
+ connect_timeout=dict(type='int', default=5),
+ delay=dict(type='int', default=0),
+ port=dict(type='int'),
+ active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
+ path=dict(type='path'),
+ search_regex=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
+ exclude_hosts=dict(type='list', elements='str'),
+ sleep=dict(type='int', default=1),
+ msg=dict(type='str'),
+ ),
+ )
+
+ host = module.params['host']
+ timeout = module.params['timeout']
+ connect_timeout = module.params['connect_timeout']
+ delay = module.params['delay']
+ port = module.params['port']
+ state = module.params['state']
+
+ path = module.params['path']
+ b_path = to_bytes(path, errors='surrogate_or_strict', nonstring='passthru')
+
+ search_regex = module.params['search_regex']
+ b_search_regex = to_bytes(search_regex, errors='surrogate_or_strict', nonstring='passthru')
+
+ msg = module.params['msg']
+
+ if search_regex is not None:
+ try:
+ b_compiled_search_re = re.compile(b_search_regex, re.MULTILINE)
+ except re.error as e:
+ module.fail_json(msg="Invalid regular expression: %s" % e)
+ else:
+ b_compiled_search_re = None
+
+ match_groupdict = {}
+ match_groups = ()
+
+ if port and path:
+ module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0)
+ if path and state == 'stopped':
+ module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0)
+ if path and state == 'drained':
+ module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0)
+ if module.params['exclude_hosts'] is not None and state != 'drained':
+ module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0)
+ for _connection_state in module.params['active_connection_states']:
+ try:
+ get_connection_state_id(_connection_state)
+ except Exception:
+ module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
+
+ start = datetime.datetime.utcnow()
+
+ if delay:
+ time.sleep(delay)
+
+ if not port and not path and state != 'drained':
+ time.sleep(timeout)
+ elif state in ['absent', 'stopped']:
+ # first wait for the stop condition
+ end = start + datetime.timedelta(seconds=timeout)
+
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ if not os.access(b_path, os.F_OK):
+ break
+ except IOError:
+ break
+ elif port:
+ try:
+ s = socket.create_connection((host, port), connect_timeout)
+ s.shutdown(socket.SHUT_RDWR)
+ s.close()
+ except Exception:
+ break
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
+
+ elif state in ['started', 'present']:
+ # wait for start condition
+ end = start + datetime.timedelta(seconds=timeout)
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ os.stat(b_path)
+ except OSError as e:
+ # If anything except file not present, throw an error
+ if e.errno != 2:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
+ # file doesn't exist yet, so continue
+ else:
+ # File exists. Are there additional things to check?
+ if not b_compiled_search_re:
+ # nope, succeed!
+ break
+ try:
+ with open(b_path, 'rb') as f:
+ with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as mm:
+ search = b_compiled_search_re.search(mm)
+ if search:
+ if search.groupdict():
+ match_groupdict = search.groupdict()
+ if search.groups():
+ match_groups = search.groups()
+
+ break
+ except IOError:
+ pass
+ elif port:
+ alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ try:
+ s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
+ except Exception:
+ # Failed to connect by connect_timeout. wait and try again
+ pass
+ else:
+ # Connected -- are there additional conditions?
+ if b_compiled_search_re:
+ b_data = b''
+ matched = False
+ while datetime.datetime.utcnow() < end:
+ max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ readable = select.select([s], [], [], max_timeout)[0]
+ if not readable:
+ # No new data. Probably means our timeout
+ # expired
+ continue
+ response = s.recv(1024)
+ if not response:
+ # Server shutdown
+ break
+ b_data += response
+ if b_compiled_search_re.search(b_data):
+ matched = True
+ break
+
+ # Shutdown the client socket
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ if matched:
+ # Found our string, success!
+ break
+ else:
+ # Connection established, success!
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ break
+
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+
+ else: # while-else
+ # Timeout expired
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
+
+ elif state == 'drained':
+ # wait until all active connections are gone
+ end = start + datetime.timedelta(seconds=timeout)
+ tcpconns = TCPConnectionInfo(module)
+ while datetime.datetime.utcnow() < end:
+ if tcpconns.get_active_connections_count() == 0:
+ break
+
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
+ elapsed=elapsed.seconds)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/wait_for_connection.py b/lib/ansible/modules/wait_for_connection.py
new file mode 100644
index 0000000..f0eccb6
--- /dev/null
+++ b/lib/ansible/modules/wait_for_connection.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for_connection
+short_description: Waits until remote system is reachable/usable
+description:
+- Waits for a total of C(timeout) seconds.
+- Retries the transport connection after a timeout of C(connect_timeout).
+- Tests the transport connection every C(sleep) seconds.
+- This module makes use of internal ansible transport (and configuration) and the ping/win_ping module to guarantee correct end-to-end functioning.
+- This module is also supported for Windows targets.
+version_added: '2.3'
+options:
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ type: int
+ default: 1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for.
+ type: int
+ default: 600
+extends_documentation_fragment:
+ - action_common_attributes
+ - action_common_attributes.flow
+attributes:
+ action:
+ support: full
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+ platform:
+ details: As long as there is a connection plugin
+ platforms: all
+seealso:
+- module: ansible.builtin.wait_for
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Wait 600 seconds for target connection to become reachable/usable
+ ansible.builtin.wait_for_connection:
+
+- name: Wait 300 seconds, but only start checking after 60 seconds
+ ansible.builtin.wait_for_connection:
+ delay: 60
+ timeout: 300
+
+# Wake desktops, wait for them to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Send magic Wake-On-Lan packet to turn on individual systems
+ community.general.wakeonlan:
+ mac: '{{ mac }}'
+ broadcast: 192.168.0.255
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable
+ ansible.builtin.wait_for_connection:
+
+ - name: Gather facts for first time
+ ansible.builtin.setup:
+
+# Build a new VM, wait for it to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Clone new VM, if missing
+ community.vmware.vmware_guest:
+ hostname: '{{ vcenter_ipaddress }}'
+ name: '{{ inventory_hostname_short }}'
+ template: Windows 2012R2
+ customization:
+ hostname: '{{ vm_shortname }}'
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable over WinRM
+ ansible.builtin.wait_for_connection:
+ timeout: 900
+
+ - name: Gather facts for first time
+ ansible.builtin.setup:
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed waiting for the connection to appear.
+ returned: always
+ type: float
+ sample: 23.1
+'''
diff --git a/lib/ansible/modules/yum.py b/lib/ansible/modules/yum.py
new file mode 100644
index 0000000..040ee27
--- /dev/null
+++ b/lib/ansible/modules/yum.py
@@ -0,0 +1,1818 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+# Copyright: (c) 2014, Epic Games, Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum
+version_added: historical
+short_description: Manages packages with the I(yum) package manager
+description:
+ - Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
+ - This module only works on Python 2. If you require Python 3 support see the M(ansible.builtin.dnf) module.
+options:
+ use_backend:
+ description:
+ - This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
+ upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
+ "new yum" and it has an C(dnf) backend.
+ - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
+ default: "auto"
+ choices: [ auto, yum, yum4, dnf ]
+ type: str
+ version_added: "2.7"
+ name:
+ description:
+ - A package name or package specifier with version, like C(name-1.0).
+ - Comparison operators for package version are valid here C(>), C(<), C(>=), C(<=). Example - C(name>=1.0)
+ - If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
+ See the C(allow_downgrade) documentation for caveats with downgrading packages.
+ - When using state=latest, this can be C('*') which means run C(yum -y update).
+ - You can also pass a url or a local path to a rpm file (using state=present).
+ To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
+ aliases: [ pkg ]
+ type: list
+ elements: str
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest
+ type: list
+ elements: str
+ version_added: "2.0"
+ list:
+ description:
+ - "Package name to run the equivalent of C(yum list --show-duplicates <package>) against. In addition to listing packages,
+ use can also list the following: C(installed), C(updates), C(available) and C(repos)."
+ - This parameter is mutually exclusive with I(name).
+ type: str
+ state:
+ description:
+ - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
+ - C(present) and C(installed) will simply ensure that a desired package is installed.
+ - C(latest) will update the specified package if it's not of the latest available version.
+ - C(absent) and C(removed) will remove the specified package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ type: str
+ choices: [ absent, installed, latest, present, removed ]
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ type: list
+ elements: str
+ version_added: "0.9"
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ type: list
+ elements: str
+ version_added: "0.9"
+ conf_file:
+ description:
+ - The remote yum configuration file to use for the transaction.
+ type: str
+ version_added: "0.6"
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ version_added: "1.2"
+ skip_broken:
+ description:
+ - Skip all unavailable packages or packages with broken dependencies
+ without raising an error. Equivalent to passing the --skip-broken option.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ update_cache:
+ description:
+ - Force yum to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "1.9"
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ - Prior to 2.1 the code worked as if this was set to C(true).
+ type: bool
+ default: "yes"
+ version_added: "2.1"
+ sslverify:
+ description:
+ - Disables SSL validation of the repository server for this transaction.
+ - This should be set to C(false) if one of the configured repositories is using an untrusted or self-signed certificate.
+ type: bool
+ default: "yes"
+ version_added: "2.13"
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.5"
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ default: "/"
+ type: str
+ version_added: "2.3"
+ security:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked security related.
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ bugfix:
+ description:
+ - If set to C(true), and C(state=latest) then only installs updates that have been marked bugfix related.
+ default: "no"
+ type: bool
+ version_added: "2.6"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ type: list
+ elements: str
+ version_added: "2.5"
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ type: list
+ elements: str
+ version_added: "2.5"
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ type: str
+ version_added: "2.7"
+ autoremove:
+ description:
+ - If C(true), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ - "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ disable_excludes:
+ description:
+ - Disable the excludes defined in YUM config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in yum.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ type: str
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the yum lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ - "NOTE: This feature requires yum >= 4 (RHEL/CentOS 8+)"
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ install_repoquery:
+ description:
+ - If repoquery is not available, install yum-utils. If the system is
+ registered to RHN or an RHN Satellite, repoquery allows for querying
+ all channels assigned to the system. It is also required to use the
+ 'list' parameter.
+ - "NOTE: This will run and be logged as a separate yum transation which
+ takes place before any other installation or removal."
+ - "NOTE: This will use the system's default enabled repositories without
+ regard for disablerepo/enablerepo given to the module."
+ required: false
+ version_added: "1.5"
+ default: "yes"
+ type: bool
+ cacheonly:
+ description:
+ - Tells yum to run entirely from system cache; does not download or update metadata.
+ default: "no"
+ type: bool
+ version_added: "2.12"
+extends_documentation_fragment:
+- action_common_attributes
+- action_common_attributes.flow
+attributes:
+ action:
+ details: In the case of yum, it has 2 action plugins that use it under the hood, M(ansible.builtin.yum) and M(ansible.builtin.package).
+ support: partial
+ async:
+ support: none
+ bypass_host_loop:
+ support: none
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+ - In versions prior to 1.9.2 this module installed and removed each package
+ given to the yum module separately. This caused problems when packages
+ specified by filename or url had to be installed or removed together. In
+ 1.9.2 this was fixed so that packages are installed in one yum
+ transaction. However, if one of the packages adds a new yum repository
+ that the other packages come from (such as epel-release) then that package
+ needs to be installed in a separate task. This mimics yum's command line
+ behaviour.
+ - 'Yum itself has two types of groups. "Package groups" are specified in the
+ rpm itself while "environment groups" are specified in a separate file
+ (usually by the distribution). Unfortunately, this division becomes
+ apparent to ansible users because ansible needs to operate on the group
+ of packages in a single transaction and yum requires groups to be specified
+ in different ways when used in that way. Package groups are specified as
+ "@development-tools" and environment groups are "@^gnome-desktop-environment".
+ Use the "yum group list hidden ids" command to see which category of group the group
+ you want to install falls into.'
+ - 'The yum module does not support clearing yum cache in an idempotent way, so it
+ was decided not to implement it, the only method is to use command and call the yum
+ command directly, namely "command: yum clean all"
+ https://github.com/ansible/ansible/pull/31450#issuecomment-352889579'
+# informational: requirements for nodes
+requirements:
+- yum
+author:
+ - Ansible Core Team
+ - Seth Vidal (@skvidal)
+ - Eduard Snesarev (@verm666)
+ - Berend De Schouwer (@berenddeschouwer)
+ - Abhijeet Kasurde (@Akasurde)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ ansible.builtin.yum:
+ name: httpd
+ state: latest
+
+- name: Install Apache >= 2.4
+ ansible.builtin.yum:
+ name: httpd>=2.4
+ state: present
+
+- name: Install a list of packages (suitable replacement for 2.11 loop deprecation warning)
+ ansible.builtin.yum:
+ name:
+ - nginx
+ - postgresql
+ - postgresql-server
+ state: present
+
+- name: Install a list of packages with a list variable
+ ansible.builtin.yum:
+ name: "{{ packages }}"
+ vars:
+ packages:
+ - httpd
+ - httpd-tools
+
+- name: Remove the Apache package
+ ansible.builtin.yum:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ ansible.builtin.yum:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Install one specific version of Apache
+ ansible.builtin.yum:
+ name: httpd-2.2.29-1.4.amzn1
+ state: present
+
+- name: Upgrade all packages
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+
+- name: Upgrade all packages, excluding kernel & foo related packages
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+ exclude: kernel*,foo*
+
+- name: Install the nginx rpm from a remote repo
+ ansible.builtin.yum:
+ name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install nginx rpm from a local file
+ ansible.builtin.yum:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install the 'Development tools' package group
+ ansible.builtin.yum:
+ name: "@Development tools"
+ state: present
+
+- name: Install the 'Gnome desktop' environment group
+ ansible.builtin.yum:
+ name: "@^gnome-desktop-environment"
+ state: present
+
+- name: List ansible packages and register result to print with debug later
+ ansible.builtin.yum:
+ list: ansible
+ register: result
+
+- name: Install package with multiple repos enabled
+ ansible.builtin.yum:
+ name: sos
+ enablerepo: "epel,ol7_latest"
+
+- name: Install package with multiple repos disabled
+ ansible.builtin.yum:
+ name: sos
+ disablerepo: "epel,ol7_latest"
+
+- name: Download the nginx package but do not install it
+ ansible.builtin.yum:
+ name:
+ - nginx
+ state: latest
+ download_only: true
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.locale import get_best_parsable_locale
+from ansible.module_utils.common.respawn import has_respawned, respawn_module
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+import errno
+import os
+import re
+import sys
+import tempfile
+
+try:
+ import rpm
+ HAS_RPM_PYTHON = True
+except ImportError:
+ HAS_RPM_PYTHON = False
+
+try:
+ import yum
+ HAS_YUM_PYTHON = True
+except ImportError:
+ HAS_YUM_PYTHON = False
+
+try:
+ from yum.misc import find_unfinished_transactions, find_ts_remaining
+ from rpmUtils.miscutils import splitFilename, compareEVR
+ transaction_helpers = True
+except ImportError:
+ transaction_helpers = False
+
+from contextlib import contextmanager
+from ansible.module_utils.urls import fetch_file
+
+def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
+rpmbin = None
+
+
+class YumModule(YumDnf):
+ """
+ Yum Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # This populates instance vars for all argument spec params
+ super(YumModule, self).__init__(module)
+
+ self.pkg_mgr_name = "yum"
+ self.lockfile = '/var/run/yum.pid'
+ self._yum_base = None
+
+ def _enablerepos_with_error_checking(self):
+ # NOTE: This seems unintuitive, but it mirrors yum's CLI behavior
+ if len(self.enablerepo) == 1:
+ try:
+ self.yum_base.repos.enableRepo(self.enablerepo[0])
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.fail_json(msg="Repository %s not found." % self.enablerepo[0])
+ else:
+ raise e
+ else:
+ for rid in self.enablerepo:
+ try:
+ self.yum_base.repos.enableRepo(rid)
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.warn("Repository %s not found." % rid)
+ else:
+ raise e
+
+ def is_lockfile_pid_valid(self):
+ try:
+ try:
+ with open(self.lockfile, 'r') as f:
+ oldpid = int(f.readline())
+ except ValueError:
+ # invalid data
+ os.unlink(self.lockfile)
+ return False
+
+ if oldpid == os.getpid():
+ # that's us?
+ os.unlink(self.lockfile)
+ return False
+
+ try:
+ with open("/proc/%d/stat" % oldpid, 'r') as f:
+ stat = f.readline()
+
+ if stat.split()[2] == 'Z':
+ # Zombie
+ os.unlink(self.lockfile)
+ return False
+ except IOError:
+ # either /proc is not mounted or the process is already dead
+ try:
+ # check the state of the process
+ os.kill(oldpid, 0)
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ # No such process
+ os.unlink(self.lockfile)
+ return False
+
+ self.module.fail_json(msg="Unable to check PID %s in %s: %s" % (oldpid, self.lockfile, to_native(e)))
+ except (IOError, OSError) as e:
+ # lockfile disappeared?
+ return False
+
+ # another copy seems to be running
+ return True
+
+ @property
+ def yum_base(self):
+ if self._yum_base:
+ return self._yum_base
+ else:
+ # Only init once
+ self._yum_base = yum.YumBase()
+ self._yum_base.preconf.debuglevel = 0
+ self._yum_base.preconf.errorlevel = 0
+ self._yum_base.preconf.plugins = True
+ self._yum_base.preconf.enabled_plugins = self.enable_plugin
+ self._yum_base.preconf.disabled_plugins = self.disable_plugin
+ if self.releasever:
+ self._yum_base.preconf.releasever = self.releasever
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ self._yum_base.preconf.root = self.installroot
+ self._yum_base.conf.installroot = self.installroot
+ if self.conf_file and os.path.exists(self.conf_file):
+ self._yum_base.preconf.fn = self.conf_file
+ if os.geteuid() != 0:
+ if hasattr(self._yum_base, 'setCacheDir'):
+ self._yum_base.setCacheDir()
+ else:
+ cachedir = yum.misc.getCacheDir()
+ self._yum_base.repos.setCacheDir(cachedir)
+ self._yum_base.conf.cache = 0
+ if self.disable_excludes:
+ self._yum_base.conf.disable_excludes = self.disable_excludes
+
+ # setting conf.sslverify allows retrieving the repo's metadata
+ # without validating the certificate, but that does not allow
+ # package installation from a bad-ssl repo.
+ self._yum_base.conf.sslverify = self.sslverify
+
+ # A sideeffect of accessing conf is that the configuration is
+ # loaded and plugins are discovered
+ self.yum_base.conf
+
+ try:
+ for rid in self.disablerepo:
+ self.yum_base.repos.disableRepo(rid)
+
+ self._enablerepos_with_error_checking()
+
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return self._yum_base
+
+ def po_to_envra(self, po):
+ if hasattr(po, 'ui_envra'):
+ return po.ui_envra
+
+ return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
+
+ def is_group_env_installed(self, name):
+ name_lower = name.lower()
+
+ if yum.__version_info__ >= (3, 4):
+ groups_list = self.yum_base.doGroupLists(return_evgrps=True)
+ else:
+ groups_list = self.yum_base.doGroupLists()
+
+ # list of the installed groups on the first index
+ groups = groups_list[0]
+ for group in groups:
+ if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
+ return True
+
+ if yum.__version_info__ >= (3, 4):
+ # list of the installed env_groups on the third index
+ envs = groups_list[2]
+ for env in envs:
+ if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
+ return True
+
+ return False
+
+ def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
+ if qf is None:
+ qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
+
+ if not repoq:
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.rpmdb.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs and not is_pkg:
+ pkgs.extend(self.yum_base.returnInstalledPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ global rpmbin
+ if not rpmbin:
+ rpmbin = self.module.get_bin_path('rpm', required=True)
+
+ cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
+ if '*' in pkgspec:
+ cmd.append('-a')
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ # rpm localizes messages and we're screen scraping so make sure we use
+ # an appropriate locale
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ if rc != 0 and 'is not installed' not in out:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
+ if 'is not installed' in out:
+ out = ''
+
+ pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
+ if not pkgs and not is_pkg:
+ cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
+ else:
+ rc2, out2, err2 = (0, '', '')
+
+ if rc2 != 0 and 'no package provides' not in out2:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
+ if 'no package provides' in out2:
+ out2 = ''
+ pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
+ return pkgs
+
+ return []
+
+ def is_available(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs:
+ pkgs.extend(self.yum_base.returnPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ myrepoq = list(repoq)
+
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return [p for p in out.split('\n') if p.strip()]
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return []
+
+ def is_update(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ updates = []
+
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(pkgspec) + \
+ self.yum_base.returnInstalledPackagesByDep(pkgspec)
+ if not pkgs:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ updates = self.yum_base.doPackageLists(pkgnarrow='updates').updates
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ retpkgs = (pkg for pkg in pkgs if pkg in updates)
+
+ return set(self.po_to_envra(p) for p in retpkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return set()
+
+ def what_provides(self, repoq, req_spec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ except Exception as e:
+ # If a repo with `repo_gpgcheck=1` is added and the repo GPG
+ # key was never accepted, querying this repo will throw an
+ # error: 'repomd.xml signature could not be verified'. In that
+ # situation we need to run `yum -y makecache fast` which will accept
+ # the key and try again.
+ if 'repomd.xml signature could not be verified' in to_native(e):
+ if self.releasever:
+ self.module.run_command(self.yum_basecmd + ['makecache', 'fast', '--releasever=%s' % self.releasever])
+ else:
+ self.module.run_command(self.yum_basecmd + ['makecache', 'fast'])
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ else:
+ raise
+ if not pkgs:
+ exact_matches, glob_matches = self.yum_base.pkgSack.matchPackageNames([req_spec])[0:2]
+ pkgs.extend(exact_matches)
+ pkgs.extend(glob_matches)
+ exact_matches, glob_matches = self.yum_base.rpmdb.matchPackageNames([req_spec])[0:2]
+ pkgs.extend(exact_matches)
+ pkgs.extend(glob_matches)
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return set(self.po_to_envra(p) for p in pkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
+ rc, out, err = self.module.run_command(cmd)
+ cmd = myrepoq + ["--qf", qf, req_spec]
+ rc2, out2, err2 = self.module.run_command(cmd)
+ if rc == 0 and rc2 == 0:
+ out += out2
+ pkgs = {p for p in out.split('\n') if p.strip()}
+ if not pkgs:
+ pkgs = self.is_installed(repoq, req_spec, qf=qf)
+ return pkgs
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
+
+ return set()
+
+ def transaction_exists(self, pkglist):
+ """
+ checks the package list to see if any packages are
+ involved in an incomplete transaction
+ """
+
+ conflicts = []
+ if not transaction_helpers:
+ return conflicts
+
+ # first, we create a list of the package 'nvreas'
+ # so we can compare the pieces later more easily
+ pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
+
+ # next, we build the list of packages that are
+ # contained within an unfinished transaction
+ unfinished_transactions = find_unfinished_transactions()
+ for trans in unfinished_transactions:
+ steps = find_ts_remaining(trans)
+ for step in steps:
+ # the action is install/erase/etc., but we only
+ # care about the package spec contained in the step
+ (action, step_spec) = step
+ (n, v, r, e, a) = splitFilename(step_spec)
+ # and see if that spec is in the list of packages
+ # requested for installation/updating
+ for pkg in pkglist_nvreas:
+ # if the name and arch match, we're going to assume
+ # this package is part of a pending transaction
+ # the label is just for display purposes
+ label = "%s-%s" % (n, a)
+ if n == pkg[0] and a == pkg[4]:
+ if label not in conflicts:
+ conflicts.append("%s-%s" % (n, a))
+ break
+ return conflicts
+
+ def local_envra(self, path):
+ """return envra of a local rpm passed in"""
+
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+ fd = os.open(path, os.O_RDONLY)
+ try:
+ header = ts.hdrFromFdno(fd)
+ except rpm.error as e:
+ return None
+ finally:
+ os.close(fd)
+
+ return '%s:%s-%s-%s.%s' % (
+ header[rpm.RPMTAG_EPOCH] or '0',
+ header[rpm.RPMTAG_NAME],
+ header[rpm.RPMTAG_VERSION],
+ header[rpm.RPMTAG_RELEASE],
+ header[rpm.RPMTAG_ARCH]
+ )
+
+ @contextmanager
+ def set_env_proxy(self):
+ # setting system proxy environment and saving old, if exists
+ namepass = ""
+ scheme = ["http", "https"]
+ old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
+ try:
+ # "_none_" is a special value to disable proxy in yum.conf/*.repo
+ if self.yum_base.conf.proxy and self.yum_base.conf.proxy not in ("_none_",):
+ if self.yum_base.conf.proxy_username:
+ namepass = namepass + self.yum_base.conf.proxy_username
+ proxy_url = self.yum_base.conf.proxy
+ if self.yum_base.conf.proxy_password:
+ namepass = namepass + ":" + self.yum_base.conf.proxy_password
+ elif '@' in self.yum_base.conf.proxy:
+ namepass = self.yum_base.conf.proxy.split('@')[0].split('//')[-1]
+ proxy_url = self.yum_base.conf.proxy.replace("{0}@".format(namepass), "")
+
+ if namepass:
+ namepass = namepass + '@'
+ for item in scheme:
+ os.environ[item + "_proxy"] = re.sub(
+ r"(http://)",
+ r"\g<1>" + namepass, proxy_url
+ )
+ else:
+ for item in scheme:
+ os.environ[item + "_proxy"] = self.yum_base.conf.proxy
+ yield
+ except yum.Errors.YumBaseError:
+ raise
+ finally:
+ # revert back to previously system configuration
+ for item in scheme:
+ if os.getenv("{0}_proxy".format(item)):
+ del os.environ["{0}_proxy".format(item)]
+ if old_proxy_env[0]:
+ os.environ["http_proxy"] = old_proxy_env[0]
+ if old_proxy_env[1]:
+ os.environ["https_proxy"] = old_proxy_env[1]
+
+ def pkg_to_dict(self, pkgstr):
+ if pkgstr.strip() and pkgstr.count('|') == 5:
+ n, e, v, r, a, repo = pkgstr.split('|')
+ else:
+ return {'error_parsing': pkgstr}
+
+ d = {
+ 'name': n,
+ 'arch': a,
+ 'epoch': e,
+ 'release': r,
+ 'version': v,
+ 'repo': repo,
+ 'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
+ }
+
+ if repo == 'installed':
+ d['yumstate'] = 'installed'
+ else:
+ d['yumstate'] = 'available'
+
+ return d
+
+ def repolist(self, repoq, qf="%{repoid}"):
+ cmd = repoq + ["--qf", qf, "-a"]
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+ rc, out, _ = self.module.run_command(cmd)
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ return []
+
+ def list_stuff(self, repoquerybin, stuff):
+
+ qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
+ # is_installed goes through rpm instead of repoquery so it needs a slightly different format
+ is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
+ repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.disablerepo:
+ repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
+ if self.enablerepo:
+ repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
+ if self.installroot != '/':
+ repoq.extend(['--installroot', self.installroot])
+ if self.conf_file and os.path.exists(self.conf_file):
+ repoq += ['-c', self.conf_file]
+
+ if stuff == 'installed':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
+
+ if stuff == 'updates':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'available':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'repos':
+ return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
+
+ return [
+ self.pkg_to_dict(p) for p in
+ sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
+ if p.strip()
+ ]
+
+ def exec_install(self, items, action, pkgs, res):
+ cmd = self.yum_basecmd + [action] + pkgs
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ # setting sslverify using --setopt is required as conf.sslverify only
+ # affects the metadata retrieval.
+ if not self.sslverify:
+ cmd.extend(['--setopt', 'sslverify=0'])
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
+ else:
+ res['changes'] = dict(installed=pkgs)
+
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+
+ if rc == 1:
+ for spec in items:
+ # Fail on invalid urls:
+ if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
+ err = 'Package at %s could not be installed' % spec
+ self.module.fail_json(changed=False, msg=err, rc=rc)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] += err
+ res['changed'] = True
+
+ if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
+ res['changed'] = False
+
+ if rc != 0:
+ res['changed'] = False
+ self.module.fail_json(**res)
+
+ # Fail if yum prints 'No space left on device' because that means some
+ # packages failed executing their post install scripts because of lack of
+ # free space (e.g. kernel package couldn't generate initramfs). Note that
+ # yum can still exit with rc=0 even if some post scripts didn't execute
+ # correctly.
+ if 'No space left on device' in (out or err):
+ res['changed'] = False
+ res['msg'] = 'No space left on device'
+ self.module.fail_json(**res)
+
+ # FIXME - if we did an install - go and check the rpmdb to see if it actually installed
+ # look for each pkg in rpmdb
+ # look for each pkg via obsoletes
+
+ return res
+
+ def install(self, items, repoq):
+
+ pkgs = []
+ downgrade_pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['rc'] = 0
+ res['changed'] = False
+
+ for spec in items:
+ pkg = None
+ downgrade_candidate = False
+
+ # check if pkgspec is installed (if possible for idempotence)
+ if spec.endswith('.rpm') or '://' in spec:
+ if '://' not in spec and not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ if '://' in spec:
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ if not package.endswith('.rpm'):
+ # yum requires a local file to have the extension of .rpm and we
+ # can not guarantee that from an URL (redirects, proxies, etc)
+ new_package_path = '%s.rpm' % package
+ os.rename(package, new_package_path)
+ package = new_package_path
+ else:
+ package = spec
+
+ # most common case is the pkg is already installed
+ envra = self.local_envra(package)
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+ installed_pkgs = self.is_installed(repoq, envra)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
+ continue
+
+ (name, ver, rel, epoch, arch) = splitFilename(envra)
+ installed_pkgs = self.is_installed(repoq, name)
+
+ # case for two same envr but different archs like x86_64 and i686
+ if len(installed_pkgs) == 2:
+ (cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
+ (cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
+ cur_epoch0 = cur_epoch0 or '0'
+ cur_epoch1 = cur_epoch1 or '0'
+ compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
+ if compare == 0 and cur_arch0 != cur_arch1:
+ for installed_pkg in installed_pkgs:
+ if installed_pkg.endswith(arch):
+ installed_pkgs = [installed_pkg]
+
+ if len(installed_pkgs) == 1:
+ installed_pkg = installed_pkgs[0]
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
+ cur_epoch = cur_epoch or '0'
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+
+ # compare > 0 -> higher version is installed
+ # compare == 0 -> exact version is installed
+ # compare < 0 -> lower version is installed
+ if compare > 0 and self.allow_downgrade:
+ downgrade_candidate = True
+ elif compare >= 0:
+ continue
+
+ # else: if there are more installed packages with the same name, that would mean
+ # kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
+
+ pkg = package
+
+ # groups
+ elif spec.startswith('@'):
+ if self.is_group_env_installed(spec):
+ continue
+
+ pkg = spec
+
+ # range requires or file-requires or pkgname :(
+ else:
+ # most common case is the pkg is already installed and done
+ # short circuit all the bs - and search for it as a pkg in is_installed
+ # if you find it then we're done
+ if not set(['*', '?']).intersection(set(spec)):
+ installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
+ continue
+
+ # look up what pkgs provide this
+ pkglist = self.what_provides(repoq, spec)
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['rc'] = 125 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of them are installed
+ # then nothing to do
+
+ found = False
+ for this in pkglist:
+ if self.is_installed(repoq, this, is_pkg=True):
+ found = True
+ res['results'].append('%s providing %s is already installed' % (this, spec))
+ break
+
+ # if the version of the pkg you have installed is not in ANY repo, but there are
+ # other versions in the repos (both higher and lower) then the previous checks won't work.
+ # so we check one more time. This really only works for pkgname - not for file provides or virt provides
+ # but virt provides should be all caught in what_provides on its own.
+ # highly irritating
+ if not found:
+ if self.is_installed(repoq, spec):
+ found = True
+ res['results'].append('package providing %s is already installed' % (spec))
+
+ if found:
+ continue
+
+ # Downgrade - The yum install command will only install or upgrade to a spec version, it will
+ # not install an older version of an RPM even if specified by the install spec. So we need to
+ # determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
+ if self.allow_downgrade:
+ for package in pkglist:
+ # Get the NEVRA of the requested package using pkglist instead of spec because pkglist
+ # contains consistently-formatted package names returned by yum, rather than user input
+ # that is often not parsed correctly by splitFilename().
+ (name, ver, rel, epoch, arch) = splitFilename(package)
+
+ # Check if any version of the requested package is installed
+ inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
+ if inst_pkgs:
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+ if compare > 0:
+ downgrade_candidate = True
+ else:
+ downgrade_candidate = False
+ break
+
+ # If package needs to be installed/upgraded/downgraded, then pass in the spec
+ # we could get here if nothing provides it but that's not
+ # the error we're catching here
+ pkg = spec
+
+ if downgrade_candidate and self.allow_downgrade:
+ downgrade_pkgs.append(pkg)
+ else:
+ pkgs.append(pkg)
+
+ if downgrade_pkgs:
+ res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
+
+ if pkgs:
+ res = self.exec_install(items, 'install', pkgs, res)
+
+ return res
+
+ def remove(self, items, repoq):
+
+ pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+
+ for pkg in items:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg)
+
+ if installed:
+ pkgs.append(pkg)
+ else:
+ res['results'].append('%s is not installed' % pkg)
+
+ if pkgs:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
+ else:
+ res['changes'] = dict(removed=pkgs)
+
+ # run an actual yum transaction
+ if self.autoremove:
+ cmd = self.yum_basecmd + ["autoremove"] + pkgs
+ else:
+ cmd = self.yum_basecmd + ["remove"] + pkgs
+ rc, out, err = self.module.run_command(cmd)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] = err
+
+ if rc != 0:
+ if self.autoremove and 'No such command' in out:
+ self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
+ else:
+ self.module.fail_json(**res)
+
+ # compile the results into one batch. If anything is changed
+ # then mark changed
+ # at the end - if we've end up failed then fail out of the rest
+ # of the process
+
+ # at this point we check to see if the pkg is no longer present
+ self._yum_base = None # previous YumBase package index is now invalid
+ for pkg in pkgs:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg, is_pkg=True)
+
+ if installed:
+ # Return a message so it's obvious to the user why yum failed
+ # and which package couldn't be removed. More details:
+ # https://github.com/ansible/ansible/issues/35672
+ res['msg'] = "Package '%s' couldn't be removed!" % pkg
+ self.module.fail_json(**res)
+
+ res['changed'] = True
+
+ return res
+
+ def run_check_update(self):
+ # run check-update to see if we have packages pending
+ if self.releasever:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'] + ['--releasever=%s' % self.releasever])
+ else:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
+ return rc, out, err
+
+ @staticmethod
+ def parse_check_update(check_update_output):
+ # preprocess string and filter out empty lines so the regex below works
+ out = '\n'.join((l for l in check_update_output.splitlines() if l))
+
+ # Remove incorrect new lines in longer columns in output from yum check-update
+ # yum line wrapping can move the repo to the next line:
+ # some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
+ # some-repo-label
+ out = re.sub(r'\n\W+(.*)', r' \1', out)
+
+ updates = {}
+ obsoletes = {}
+ for line in out.split('\n'):
+ line = line.split()
+ """
+ Ignore irrelevant lines:
+ - '*' in line matches lines like mirror lists: "* base: mirror.corbina.net"
+ - len(line) != 3 or 6 could be strings like:
+ "This system is not registered with an entitlement server..."
+ - len(line) = 6 is package obsoletes
+ - checking for '.' in line[0] (package name) likely ensures that it is of format:
+ "package_name.arch" (coreutils.x86_64)
+ """
+ if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
+ continue
+
+ pkg, version, repo = line[0], line[1], line[2]
+ name, dist = pkg.rsplit('.', 1)
+
+ if name not in updates:
+ updates[name] = []
+
+ updates[name].append({'version': version, 'dist': dist, 'repo': repo})
+
+ if len(line) == 6:
+ obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
+ obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
+
+ if obsolete_name not in obsoletes:
+ obsoletes[obsolete_name] = []
+
+ obsoletes[obsolete_name].append({'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo})
+
+ return updates, obsoletes
+
+ def latest(self, items, repoq):
+
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+ pkgs = {}
+ pkgs['update'] = []
+ pkgs['install'] = []
+ updates = {}
+ obsoletes = {}
+ update_all = False
+ cmd = self.yum_basecmd[:]
+
+ # determine if we're doing an update all
+ if '*' in items:
+ update_all = True
+
+ rc, out, err = self.run_check_update()
+
+ if rc == 0 and update_all:
+ res['results'].append('Nothing to do here, all packages are up to date')
+ return res
+ elif rc == 100:
+ updates, obsoletes = self.parse_check_update(out)
+ elif rc == 1:
+ res['msg'] = err
+ res['rc'] = rc
+ self.module.fail_json(**res)
+
+ if update_all:
+ cmd.append('update')
+ will_update = set(updates.keys())
+ will_update_from_other_package = dict()
+ else:
+ will_update = set()
+ will_update_from_other_package = dict()
+ for spec in items:
+ # some guess work involved with groups. update @<group> will install the group if missing
+ if spec.startswith('@'):
+ pkgs['update'].append(spec)
+ will_update.add(spec)
+ continue
+
+ # check if pkgspec is installed (if possible for idempotence)
+ # localpkg
+ if spec.endswith('.rpm') and '://' not in spec:
+ if not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # get the pkg e:name-v-r.arch
+ envra = self.local_envra(spec)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # URL
+ if '://' in spec:
+ # download package so that we can check if it's already installed
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ envra = self.local_envra(package)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get envra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # dep/pkgname - find it
+ if self.is_installed(repoq, spec):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ pkglist = self.what_provides(repoq, spec)
+ # FIXME..? may not be desirable to throw an exception here if a single package is missing
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ nothing_to_do = True
+ for pkg in pkglist:
+ if spec in pkgs['install'] and self.is_available(repoq, pkg):
+ nothing_to_do = False
+ break
+
+ # this contains the full NVR and spec could contain wildcards
+ # or virtual provides (like "python-*" or "smtp-daemon") while
+ # updates contains name only.
+ pkgname, _, _, _, _ = splitFilename(pkg)
+ if spec in pkgs['update'] and pkgname in updates:
+ nothing_to_do = False
+ will_update.add(spec)
+ # Massage the updates list
+ if spec != pkgname:
+ # For reporting what packages would be updated more
+ # succinctly
+ will_update_from_other_package[spec] = pkgname
+ break
+
+ if not self.is_installed(repoq, spec) and self.update_only:
+ res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ continue
+ if nothing_to_do:
+ res['results'].append("All packages providing %s are up to date" % spec)
+ continue
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
+ res['rc'] = 128 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # check_mode output
+ to_update = []
+ for w in will_update:
+ if w.startswith('@'):
+ # yum groups
+ to_update.append((w, None))
+ elif w not in updates:
+ # There are (at least, probably more) 2 ways we can get here:
+ #
+ # * A virtual provides (our user specifies "webserver", but
+ # "httpd" is the key in 'updates').
+ #
+ # * A wildcard. emac* will get us here if there's a package
+ # called 'emacs' in the pending updates list. 'updates' will
+ # of course key on 'emacs' in that case.
+
+ other_pkg = will_update_from_other_package[w]
+
+ # We are guaranteed that: other_pkg in updates
+ # ...based on the logic above. But we only want to show one
+ # update in this case (given the wording of "at least") below.
+ # As an example, consider a package installed twice:
+ # foobar.x86_64, foobar.i686
+ # We want to avoid having both:
+ # ('foo*', 'because of (at least) foobar-1.x86_64 from repo')
+ # ('foo*', 'because of (at least) foobar-1.i686 from repo')
+ # We just pick the first one.
+ #
+ # TODO: This is something that might be nice to change, but it
+ # would be a module UI change. But without it, we're
+ # dropping potentially important information about what
+ # was updated. Instead of (given_spec, random_matching_package)
+ # it'd be nice if we appended (given_spec, [all_matching_packages])
+ #
+ # ... But then, we also drop information if multiple
+ # different (distinct) packages match the given spec and
+ # we should probably fix that too.
+ pkg = updates[other_pkg][0]
+ to_update.append(
+ (
+ w,
+ 'because of (at least) %s-%s.%s from %s' % (
+ other_pkg,
+ pkg['version'],
+ pkg['dist'],
+ pkg['repo']
+ )
+ )
+ )
+ else:
+ # Otherwise the spec is an exact match
+ for pkg in updates[w]:
+ to_update.append(
+ (
+ w,
+ '%s.%s from %s' % (
+ pkg['version'],
+ pkg['dist'],
+ pkg['repo']
+ )
+ )
+ )
+
+ if self.update_only:
+ res['changes'] = dict(installed=[], updated=to_update)
+ else:
+ res['changes'] = dict(installed=pkgs['install'], updated=to_update)
+
+ if obsoletes:
+ res['obsoletes'] = obsoletes
+
+ # return results before we actually execute stuff
+ if self.module.check_mode:
+ if will_update or pkgs['install']:
+ res['changed'] = True
+ return res
+
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ # run commands
+ if update_all:
+ rc, out, err = self.module.run_command(cmd)
+ res['changed'] = True
+ elif self.update_only:
+ if pkgs['update']:
+ cmd += ['update'] + pkgs['update']
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+ elif pkgs['install'] or will_update and not self.update_only:
+ cmd += ['install'] + pkgs['install'] + pkgs['update']
+ locale = get_best_parsable_locale(self.module)
+ lang_env = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale)
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+
+ res['rc'] = rc
+ res['msg'] += err
+ res['results'].append(out)
+
+ if rc:
+ res['failed'] = True
+
+ return res
+
+ def ensure(self, repoq):
+ pkgs = self.names
+
+ # autoremove was provided without `name`
+ if not self.names and self.autoremove:
+ pkgs = []
+ self.state = 'absent'
+
+ if self.conf_file and os.path.exists(self.conf_file):
+ self.yum_basecmd += ['-c', self.conf_file]
+
+ if repoq:
+ repoq += ['-c', self.conf_file]
+
+ if self.skip_broken:
+ self.yum_basecmd.extend(['--skip-broken'])
+
+ if self.disablerepo:
+ self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
+
+ if self.enablerepo:
+ self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
+
+ if self.enable_plugin:
+ self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
+
+ if self.disable_plugin:
+ self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
+
+ if self.exclude:
+ e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.disable_excludes:
+ self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
+
+ if self.cacheonly:
+ self.yum_basecmd.extend(['--cacheonly'])
+
+ if self.download_only:
+ self.yum_basecmd.extend(['--downloadonly'])
+
+ if self.download_dir:
+ self.yum_basecmd.extend(['--downloaddir=%s' % self.download_dir])
+
+ if self.releasever:
+ self.yum_basecmd.extend(['--releasever=%s' % self.releasever])
+
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ e_cmd = ['--installroot=%s' % self.installroot]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.state in ('installed', 'present', 'latest'):
+ """ The need of this entire if conditional has to be changed
+ this function is the ensure function that is called
+ in the main section.
+
+ This conditional tends to disable/enable repo for
+ install present latest action, same actually
+ can be done for remove and absent action
+
+ As solution I would advice to cal
+ try: self.yum_base.repos.disableRepo(disablerepo)
+ and
+ try: self.yum_base.repos.enableRepo(enablerepo)
+ right before any yum_cmd is actually called regardless
+ of yum action.
+
+ Please note that enable/disablerepo options are general
+ options, this means that we can call those with any action
+ option. https://linux.die.net/man/8/yum
+
+ This docstring will be removed together when issue: #21619
+ will be solved.
+
+ This has been triggered by: #19587
+ """
+
+ if self.update_cache:
+ self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+
+ try:
+ current_repos = self.yum_base.repos.repos.keys()
+ if self.enablerepo:
+ try:
+ new_repos = self.yum_base.repos.repos.keys()
+ for i in new_repos:
+ if i not in current_repos:
+ rid = self.yum_base.repos.getRepo(i)
+ a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
+ current_repos = new_repos
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
+ if self.state == 'latest' or self.update_only:
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ if self.security:
+ self.yum_basecmd.append('--security')
+ if self.bugfix:
+ self.yum_basecmd.append('--bugfix')
+ res = self.latest(pkgs, repoq)
+ elif self.state in ('installed', 'present'):
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ res = self.install(pkgs, repoq)
+ elif self.state in ('removed', 'absent'):
+ res = self.remove(pkgs, repoq)
+ else:
+ # should be caught by AnsibleModule argument_spec
+ self.module.fail_json(
+ msg="we should never get here unless this all failed",
+ changed=False,
+ results='',
+ errors='unexpected state'
+ )
+ return res
+
+ @staticmethod
+ def has_yum():
+ return HAS_YUM_PYTHON
+
+ def run(self):
+ """
+ actually execute the module code backend
+ """
+
+ if (not HAS_RPM_PYTHON or not HAS_YUM_PYTHON) and sys.executable != '/usr/bin/python' and not has_respawned():
+ respawn_module('/usr/bin/python')
+ # end of the line for this process; we'll exit here once the respawned module has completed
+
+ error_msgs = []
+ if not HAS_RPM_PYTHON:
+ error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+ if not HAS_YUM_PYTHON:
+ error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+
+ self.wait_for_lock()
+
+ if error_msgs:
+ self.module.fail_json(msg='. '.join(error_msgs))
+
+ # fedora will redirect yum to dnf, which has incompatibilities
+ # with how this module expects yum to operate. If yum-deprecated
+ # is available, use that instead to emulate the old behaviors.
+ if self.module.get_bin_path('yum-deprecated'):
+ yumbin = self.module.get_bin_path('yum-deprecated')
+ else:
+ yumbin = self.module.get_bin_path('yum')
+
+ # need debug level 2 to get 'Nothing to do' for groupinstall.
+ self.yum_basecmd = [yumbin, '-d', '2', '-y']
+
+ if self.update_cache and not self.names and not self.list:
+ rc, stdout, stderr = self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+ if rc == 0:
+ self.module.exit_json(
+ changed=False,
+ msg="Cache updated",
+ rc=rc,
+ results=[]
+ )
+ else:
+ self.module.exit_json(
+ changed=False,
+ msg="Failed to update cache",
+ rc=rc,
+ results=[stderr],
+ )
+
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.install_repoquery and not repoquerybin and not self.module.check_mode:
+ yum_path = self.module.get_bin_path('yum')
+ if yum_path:
+ if self.releasever:
+ self.module.run_command('%s -y install yum-utils --releasever %s' % (yum_path, self.releasever))
+ else:
+ self.module.run_command('%s -y install yum-utils' % yum_path)
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.list:
+ if not repoquerybin:
+ self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
+ results = {'results': self.list_stuff(repoquerybin, self.list)}
+ else:
+ # If rhn-plugin is installed and no rhn-certificate is available on
+ # the system then users will see an error message using the yum API.
+ # Use repoquery in those cases.
+
+ repoquery = None
+ try:
+ yum_plugins = self.yum_base.plugins._plugins
+ except AttributeError:
+ pass
+ else:
+ if 'rhnplugin' in yum_plugins:
+ if repoquerybin:
+ repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.installroot != '/':
+ repoquery.extend(['--installroot', self.installroot])
+
+ if self.disable_excludes:
+ # repoquery does not support --disableexcludes,
+ # so make a temp copy of yum.conf and get rid of the 'exclude=' line there
+ try:
+ with open('/etc/yum.conf', 'r') as f:
+ content = f.readlines()
+
+ tmp_conf_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, delete=False)
+ self.module.add_cleanup_file(tmp_conf_file.name)
+
+ tmp_conf_file.writelines([c for c in content if not c.startswith("exclude=")])
+ tmp_conf_file.close()
+ except Exception as e:
+ self.module.fail_json(msg="Failure setting up repoquery: %s" % to_native(e))
+
+ repoquery.extend(['-c', tmp_conf_file.name])
+
+ results = self.ensure(repoquery)
+ if repoquery:
+ results['msg'] = '%s %s' % (
+ results.get('msg', ''),
+ 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
+ )
+
+ self.module.exit_json(**results)
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = YumModule(module)
+ module_implementation.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/yum_repository.py b/lib/ansible/modules/yum_repository.py
new file mode 100644
index 0000000..84a10b9
--- /dev/null
+++ b/lib/ansible/modules/yum_repository.py
@@ -0,0 +1,735 @@
+# encoding: utf-8
+
+# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum_repository
+author: Jiri Tyr (@jtyr)
+version_added: '2.1'
+short_description: Add or remove YUM repositories
+description:
+ - Add or remove YUM repositories in RPM-based Linux distributions.
+ - If you wish to update an existing repository definition use M(community.general.ini_file) instead.
+
+options:
+ async:
+ description:
+ - If set to C(true) Yum will download packages and metadata from this
+ repo in parallel, if possible.
+ - In ansible-core 2.11, 2.12, and 2.13 the default value is C(true).
+ - This option has been deprecated in RHEL 8. If you're using one of the
+ versions listed above, you can set this option to None to avoid passing an
+ unknown configuration option.
+ type: bool
+ bandwidth:
+ description:
+ - Maximum available network bandwidth in bytes/second. Used with the
+ I(throttle) option.
+ - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
+ throttling will be disabled. If I(throttle) is expressed as a data rate
+ (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
+ throttling).
+ type: str
+ default: '0'
+ baseurl:
+ description:
+ - URL to the directory where the yum repository's 'repodata' directory
+ lives.
+ - It can also be a list of multiple URLs.
+ - This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ type: list
+ elements: str
+ cost:
+ description:
+ - Relative cost of accessing this repository. Useful for weighing one
+ repo's packages as greater/less than any other.
+ type: str
+ default: '1000'
+ deltarpm_metadata_percentage:
+ description:
+ - When the relative size of deltarpm metadata vs pkgs is larger than
+ this, deltarpm metadata is not downloaded from the repo. Note that you
+ can give values over C(100), so C(200) means that the metadata is
+ required to be half the size of the packages. Use C(0) to turn off
+ this check, and always download metadata.
+ type: str
+ default: '100'
+ deltarpm_percentage:
+ description:
+ - When the relative size of delta vs pkg is larger than this, delta is
+ not used. Use C(0) to turn off delta rpm processing. Local repositories
+ (with file:// I(baseurl)) have delta rpms turned off by default.
+ type: str
+ default: '75'
+ description:
+ description:
+ - A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
+ - This parameter is only required if I(state) is set to C(present).
+ type: str
+ enabled:
+ description:
+ - This tells yum whether or not use this repository.
+ - Yum default value is C(true).
+ type: bool
+ enablegroups:
+ description:
+ - Determines whether yum will allow the use of package groups for this
+ repository.
+ - Yum default value is C(true).
+ type: bool
+ exclude:
+ description:
+ - List of packages to exclude from updates or installs. This should be a
+ space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed.
+ - The list can also be a regular YAML array.
+ type: list
+ elements: str
+ failovermethod:
+ choices: [roundrobin, priority]
+ default: roundrobin
+ description:
+ - C(roundrobin) randomly selects a URL out of the list of URLs to start
+ with and proceeds through each of them as it encounters a failure
+ contacting the host.
+ - C(priority) starts from the first I(baseurl) listed and reads through
+ them sequentially.
+ type: str
+ file:
+ description:
+ - File name without the C(.repo) extension to save the repo in. Defaults
+ to the value of I(name).
+ type: str
+ gpgcakey:
+ description:
+ - A URL pointing to the ASCII-armored CA key file for the repository.
+ type: str
+ gpgcheck:
+ description:
+ - Tells yum whether or not it should perform a GPG signature check on
+ packages.
+ - No default setting. If the value is not set, the system setting from
+ C(/etc/yum.conf) or system default of C(false) will be used.
+ type: bool
+ gpgkey:
+ description:
+ - A URL pointing to the ASCII-armored GPG key file for the repository.
+ - It can also be a list of multiple URLs.
+ type: list
+ elements: str
+ module_hotfixes:
+ description:
+ - Disable module RPM filtering and make all RPMs from the repository
+ available. The default is C(None).
+ version_added: '2.11'
+ type: bool
+ http_caching:
+ description:
+ - Determines how upstream HTTP caches are instructed to handle any HTTP
+ downloads that Yum does.
+ - C(all) means that all HTTP downloads should be cached.
+ - C(packages) means that only RPM package downloads should be cached (but
+ not repository metadata downloads).
+ - C(none) means that no HTTP downloads should be cached.
+ choices: [all, packages, none]
+ type: str
+ default: all
+ include:
+ description:
+ - Include external configuration file. Both, local path and URL is
+ supported. Configuration file will be inserted at the position of the
+ I(include=) line. Included files may contain further include lines.
+ Yum will abort with an error if an inclusion loop is detected.
+ type: str
+ includepkgs:
+ description:
+ - List of packages you want to only use from a repository. This should be
+ a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed. Substitution variables (e.g. C($releasever)) are honored
+ here.
+ - The list can also be a regular YAML array.
+ type: list
+ elements: str
+ ip_resolve:
+ description:
+ - Determines how yum resolves host names.
+ - C(4) or C(IPv4) - resolve to IPv4 addresses only.
+ - C(6) or C(IPv6) - resolve to IPv6 addresses only.
+ choices: ['4', '6', IPv4, IPv6, whatever]
+ type: str
+ default: whatever
+ keepalive:
+ description:
+ - This tells yum whether or not HTTP/1.1 keepalive should be used with
+ this repository. This can improve transfer speeds by using one
+ connection when downloading multiple files from a repository.
+ type: bool
+ default: 'no'
+ keepcache:
+ description:
+ - Either C(1) or C(0). Determines whether or not yum keeps the cache of
+ headers and packages after successful installation.
+ choices: ['0', '1']
+ type: str
+ default: '1'
+ metadata_expire:
+ description:
+ - Time (in seconds) after which the metadata will expire.
+ - Default value is 6 hours.
+ type: str
+ default: '21600'
+ metadata_expire_filter:
+ description:
+ - Filter the I(metadata_expire) time, allowing a trade of speed for
+ accuracy if a command doesn't require it. Each yum command can specify
+ that it requires a certain level of timeliness quality from the remote
+ repos. from "I'm about to install/upgrade, so this better be current"
+ to "Anything that's available is good enough".
+ - C(never) - Nothing is filtered, always obey I(metadata_expire).
+ - C(read-only:past) - Commands that only care about past information are
+ filtered from metadata expiring. Eg. I(yum history) info (if history
+ needs to lookup anything about a previous transaction, then by
+ definition the remote package was available in the past).
+ - C(read-only:present) - Commands that are balanced between past and
+ future. Eg. I(yum list yum).
+ - C(read-only:future) - Commands that are likely to result in running
+ other commands which will require the latest metadata. Eg.
+ I(yum check-update).
+ - Note that this option does not override "yum clean expire-cache".
+ choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ type: str
+ default: 'read-only:present'
+ metalink:
+ description:
+ - Specifies a URL to a metalink file for the repomd.xml, a list of
+ mirrors for the entire repository are generated by converting the
+ mirrors for the repomd.xml file to a I(baseurl).
+ - This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ type: str
+ mirrorlist:
+ description:
+ - Specifies a URL to a file containing a list of baseurls.
+ - This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
+ C(present).
+ type: str
+ mirrorlist_expire:
+ description:
+ - Time (in seconds) after which the mirrorlist locally cached will
+ expire.
+ - Default value is 6 hours.
+ type: str
+ default: '21600'
+ name:
+ description:
+ - Unique repository ID. This option builds the section name of the repository in the repo file.
+ - This parameter is only required if I(state) is set to C(present) or
+ C(absent).
+ type: str
+ required: true
+ password:
+ description:
+ - Password to use with the username for basic authentication.
+ type: str
+ priority:
+ description:
+ - Enforce ordered protection of repositories. The value is an integer
+ from 1 to 99.
+ - This option only works if the YUM Priorities plugin is installed.
+ type: str
+ default: '99'
+ protect:
+ description:
+ - Protect packages from updates from other repositories.
+ type: bool
+ default: 'no'
+ proxy:
+ description:
+ - URL to the proxy server that yum should use. Set to C(_none_) to
+ disable the global proxy setting.
+ type: str
+ proxy_password:
+ description:
+ - Password for this proxy.
+ type: str
+ proxy_username:
+ description:
+ - Username to use for proxy.
+ type: str
+ repo_gpgcheck:
+ description:
+ - This tells yum whether or not it should perform a GPG signature check
+ on the repodata from this repository.
+ type: bool
+ default: 'no'
+ reposdir:
+ description:
+ - Directory where the C(.repo) files will be stored.
+ type: path
+ default: /etc/yum.repos.d
+ retries:
+ description:
+ - Set the number of times any attempt to retrieve a file should retry
+ before returning an error. Setting this to C(0) makes yum try forever.
+ type: str
+ default: '10'
+ s3_enabled:
+ description:
+ - Enables support for S3 repositories.
+ - This option only works if the YUM S3 plugin is installed.
+ type: bool
+ default: 'no'
+ skip_if_unavailable:
+ description:
+ - If set to C(true) yum will continue running if this repository cannot be
+ contacted for any reason. This should be set carefully as all repos are
+ consulted for any given command.
+ type: bool
+ default: 'no'
+ ssl_check_cert_permissions:
+ description:
+ - Whether yum should check the permissions on the paths for the
+ certificates on the repository (both remote and local).
+ - If we can't read any of the files then yum will force
+ I(skip_if_unavailable) to be C(true). This is most useful for non-root
+ processes which use yum on repos that have client cert files which are
+ readable only by root.
+ type: bool
+ default: 'no'
+ sslcacert:
+ description:
+ - Path to the directory containing the databases of the certificate
+ authorities yum should use to verify SSL certificates.
+ type: str
+ aliases: [ ca_cert ]
+ sslclientcert:
+ description:
+ - Path to the SSL client certificate yum should use to connect to
+ repos/remote sites.
+ type: str
+ aliases: [ client_cert ]
+ sslclientkey:
+ description:
+ - Path to the SSL client key yum should use to connect to repos/remote
+ sites.
+ type: str
+ aliases: [ client_key ]
+ sslverify:
+ description:
+ - Defines whether yum should verify SSL certificates/hosts at all.
+ type: bool
+ default: 'yes'
+ aliases: [ validate_certs ]
+ state:
+ description:
+ - State of the repo file.
+ choices: [absent, present]
+ type: str
+ default: present
+ throttle:
+ description:
+ - Enable bandwidth throttling for downloads.
+ - This option can be expressed as a absolute data rate in bytes/sec. An
+ SI prefix (k, M or G) may be appended to the bandwidth value.
+ type: str
+ timeout:
+ description:
+ - Number of seconds to wait for a connection before timing out.
+ type: str
+ default: '30'
+ ui_repoid_vars:
+ description:
+ - When a repository id is displayed, append these yum variables to the
+ string if they are used in the I(baseurl)/etc. Variables are appended
+ in the order listed (and found).
+ type: str
+ default: releasever basearch
+ username:
+ description:
+ - Username to use for basic authentication to a repo or really any url.
+ type: str
+
+extends_documentation_fragment:
+ - action_common_attributes
+ - files
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: rhel
+notes:
+ - All comments will be removed if modifying an existing repo file.
+ - Section order is preserved in an existing repo file.
+ - Parameters in a section are ordered alphabetically in an existing repo
+ file.
+ - The repo file will be automatically deleted if it contains no repository.
+ - When removing a repository, beware that the metadata cache may still remain
+ on disk until you run C(yum clean all). Use a notification handler for this.
+ - "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
+ handling"
+'''
+
+EXAMPLES = '''
+- name: Add repository
+ ansible.builtin.yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+
+- name: Add multiple repositories into the same file (1/2)
+ ansible.builtin.yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ file: external_repos
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+ gpgcheck: no
+
+- name: Add multiple repositories into the same file (2/2)
+ ansible.builtin.yum_repository:
+ name: rpmforge
+ description: RPMforge YUM repo
+ file: external_repos
+ baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
+ mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
+ enabled: no
+
+# Handler showing how to clean yum metadata cache
+- name: yum-clean-metadata
+ ansible.builtin.command: yum clean metadata
+
+# Example removing a repository and cleaning up metadata cache
+- name: Remove repository (and clean up left-over metadata)
+ ansible.builtin.yum_repository:
+ name: epel
+ state: absent
+ notify: yum-clean-metadata
+
+- name: Remove repository from a specific repo file
+ ansible.builtin.yum_repository:
+ name: epel
+ file: external_repos
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: repository name
+ returned: success
+ type: str
+ sample: "epel"
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+class YumRepo(object):
+ # Class global variables
+ module = None
+ params = None
+ section = None
+ repofile = configparser.RawConfigParser()
+
+ # List of parameters which will be allowed in the repo file output
+ allowed_params = [
+ 'async',
+ 'bandwidth',
+ 'baseurl',
+ 'cost',
+ 'deltarpm_metadata_percentage',
+ 'deltarpm_percentage',
+ 'enabled',
+ 'enablegroups',
+ 'exclude',
+ 'failovermethod',
+ 'gpgcakey',
+ 'gpgcheck',
+ 'gpgkey',
+ 'module_hotfixes',
+ 'http_caching',
+ 'include',
+ 'includepkgs',
+ 'ip_resolve',
+ 'keepalive',
+ 'keepcache',
+ 'metadata_expire',
+ 'metadata_expire_filter',
+ 'metalink',
+ 'mirrorlist',
+ 'mirrorlist_expire',
+ 'name',
+ 'password',
+ 'priority',
+ 'protect',
+ 'proxy',
+ 'proxy_password',
+ 'proxy_username',
+ 'repo_gpgcheck',
+ 'retries',
+ 's3_enabled',
+ 'skip_if_unavailable',
+ 'sslcacert',
+ 'ssl_check_cert_permissions',
+ 'sslclientcert',
+ 'sslclientkey',
+ 'sslverify',
+ 'throttle',
+ 'timeout',
+ 'ui_repoid_vars',
+ 'username']
+
+ # List of parameters which can be a list
+ list_params = ['exclude', 'includepkgs']
+
+ def __init__(self, module):
+ # To be able to use fail_json
+ self.module = module
+ # Shortcut for the params
+ self.params = self.module.params
+ # Section is always the repoid
+ self.section = self.params['repoid']
+
+ # Check if repo directory exists
+ repos_dir = self.params['reposdir']
+ if not os.path.isdir(repos_dir):
+ self.module.fail_json(
+ msg="Repo directory '%s' does not exist." % repos_dir)
+
+ # Set dest; also used to set dest parameter for the FS attributes
+ self.params['dest'] = os.path.join(
+ repos_dir, "%s.repo" % self.params['file'])
+
+ # Read the repo file if it exists
+ if os.path.isfile(self.params['dest']):
+ self.repofile.read(self.params['dest'])
+
+ def add(self):
+ # Remove already existing repo and create a new one
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ # Add section
+ self.repofile.add_section(self.section)
+
+ # Baseurl/mirrorlist is not required because for removal we need only
+ # the repo name. This is why we check if the baseurl/mirrorlist is
+ # defined.
+ req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
+ if req_params == (None, None, None):
+ self.module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
+ "adding a new repo.")
+
+ # Set options
+ for key, value in sorted(self.params.items()):
+ if key in self.list_params and isinstance(value, list):
+ # Join items into one string for specific parameters
+ value = ' '.join(value)
+ elif isinstance(value, bool):
+ # Convert boolean value to integer
+ value = int(value)
+
+ # Set the value only if it was defined (default is None)
+ if value is not None and key in self.allowed_params:
+ self.repofile.set(self.section, key, value)
+
+ def save(self):
+ if len(self.repofile.sections()):
+ # Write data into the file
+ try:
+ with open(self.params['dest'], 'w') as fd:
+ self.repofile.write(fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Problems handling file %s." % self.params['dest'],
+ details=to_native(e))
+ else:
+ # Remove the file if there are not repos
+ try:
+ os.remove(self.params['dest'])
+ except OSError as e:
+ self.module.fail_json(
+ msg=(
+ "Cannot remove empty repo file %s." %
+ self.params['dest']),
+ details=to_native(e))
+
+ def remove(self):
+ # Remove section if exists
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ def dump(self):
+ repo_string = ""
+
+ # Compose the repo file
+ for section in sorted(self.repofile.sections()):
+ repo_string += "[%s]\n" % section
+
+ for key, value in sorted(self.repofile.items(section)):
+ repo_string += "%s = %s\n" % (key, value)
+
+ repo_string += "\n"
+
+ return repo_string
+
+
+def main():
+ # Module settings
+ argument_spec = dict(
+ bandwidth=dict(),
+ baseurl=dict(type='list', elements='str'),
+ cost=dict(),
+ deltarpm_metadata_percentage=dict(),
+ deltarpm_percentage=dict(),
+ description=dict(),
+ enabled=dict(type='bool'),
+ enablegroups=dict(type='bool'),
+ exclude=dict(type='list', elements='str'),
+ failovermethod=dict(choices=['roundrobin', 'priority']),
+ file=dict(),
+ gpgcakey=dict(no_log=False),
+ gpgcheck=dict(type='bool'),
+ gpgkey=dict(type='list', elements='str', no_log=False),
+ module_hotfixes=dict(type='bool'),
+ http_caching=dict(choices=['all', 'packages', 'none']),
+ include=dict(),
+ includepkgs=dict(type='list', elements='str'),
+ ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
+ keepalive=dict(type='bool'),
+ keepcache=dict(choices=['0', '1']),
+ metadata_expire=dict(),
+ metadata_expire_filter=dict(
+ choices=[
+ 'never',
+ 'read-only:past',
+ 'read-only:present',
+ 'read-only:future']),
+ metalink=dict(),
+ mirrorlist=dict(),
+ mirrorlist_expire=dict(),
+ name=dict(required=True),
+ params=dict(type='dict'),
+ password=dict(no_log=True),
+ priority=dict(),
+ protect=dict(type='bool'),
+ proxy=dict(),
+ proxy_password=dict(no_log=True),
+ proxy_username=dict(),
+ repo_gpgcheck=dict(type='bool'),
+ reposdir=dict(default='/etc/yum.repos.d', type='path'),
+ retries=dict(),
+ s3_enabled=dict(type='bool'),
+ skip_if_unavailable=dict(type='bool'),
+ sslcacert=dict(aliases=['ca_cert']),
+ ssl_check_cert_permissions=dict(type='bool'),
+ sslclientcert=dict(aliases=['client_cert']),
+ sslclientkey=dict(aliases=['client_key'], no_log=False),
+ sslverify=dict(type='bool', aliases=['validate_certs']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ throttle=dict(),
+ timeout=dict(),
+ ui_repoid_vars=dict(),
+ username=dict(),
+ )
+
+ argument_spec['async'] = dict(type='bool')
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Params was removed
+ # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ if module.params['params']:
+ module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # Check if required parameters are present
+ if state == 'present':
+ if (
+ module.params['baseurl'] is None and
+ module.params['metalink'] is None and
+ module.params['mirrorlist'] is None):
+ module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
+ if module.params['description'] is None:
+ module.fail_json(
+ msg="Parameter 'description' is required.")
+
+ # Rename "name" and "description" to ensure correct key sorting
+ module.params['repoid'] = module.params['name']
+ module.params['name'] = module.params['description']
+ del module.params['description']
+
+ # Change list type to string for baseurl and gpgkey
+ for list_param in ['baseurl', 'gpgkey']:
+ if (
+ list_param in module.params and
+ module.params[list_param] is not None):
+ module.params[list_param] = "\n".join(module.params[list_param])
+
+ # Define repo file name if it doesn't exist
+ if module.params['file'] is None:
+ module.params['file'] = module.params['repoid']
+
+ # Instantiate the YumRepo object
+ yumrepo = YumRepo(module)
+
+ # Get repo status before change
+ diff = {
+ 'before_header': yumrepo.params['dest'],
+ 'before': yumrepo.dump(),
+ 'after_header': yumrepo.params['dest'],
+ 'after': ''
+ }
+
+ # Perform action depending on the state
+ if state == 'present':
+ yumrepo.add()
+ elif state == 'absent':
+ yumrepo.remove()
+
+ # Get repo status after change
+ diff['after'] = yumrepo.dump()
+
+ # Compare repo states
+ changed = diff['before'] != diff['after']
+
+ # Save the file only if not in check mode and if there was a change
+ if not module.check_mode and changed:
+ yumrepo.save()
+
+ # Change file attributes if needed
+ if os.path.isfile(module.params['dest']):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Print status of the change
+ module.exit_json(changed=changed, repo=name, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
new file mode 100644
index 0000000..28634b1
--- /dev/null
+++ b/lib/ansible/parsing/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/ajson.py b/lib/ansible/parsing/ajson.py
new file mode 100644
index 0000000..8049755
--- /dev/null
+++ b/lib/ansible/parsing/ajson.py
@@ -0,0 +1,42 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+# Imported for backwards compat
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+
+from ansible.parsing.vault import VaultLib
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import wrap_var
+
+
+class AnsibleJSONDecoder(json.JSONDecoder):
+
+ _vaults = {} # type: dict[str, VaultLib]
+
+ def __init__(self, *args, **kwargs):
+ kwargs['object_hook'] = self.object_hook
+ super(AnsibleJSONDecoder, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def set_secrets(cls, secrets):
+ cls._vaults['default'] = VaultLib(secrets=secrets)
+
+ def object_hook(self, pairs):
+ for key in pairs:
+ value = pairs[key]
+
+ if key == '__ansible_vault':
+ value = AnsibleVaultEncryptedUnicode(value)
+ if self._vaults:
+ value.vault = self._vaults['default']
+ return value
+ elif key == '__ansible_unsafe':
+ return wrap_var(value)
+
+ return pairs
diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py
new file mode 100644
index 0000000..cbba966
--- /dev/null
+++ b/lib/ansible/parsing/dataloader.py
@@ -0,0 +1,468 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import os
+import os.path
+import re
+import tempfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleFileNotFound, AnsibleParserError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.quoting import unquote
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# Tries to determine if a path is inside a role, last dir must be 'tasks'
+# this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible.
+RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep))
+
+
+class DataLoader:
+
+ '''
+ The DataLoader class is used to load and parse YAML or JSON content,
+ either from a given file name or from a string that was previously
+ read in through other means. A Vault password can be specified, and
+ any vault-encrypted files will be decrypted.
+
+ Data read from files will also be cached, so the file will never be
+ read from disk more than once.
+
+ Usage:
+
+ dl = DataLoader()
+ # optionally: dl.set_vault_password('foo')
+ ds = dl.load('...')
+ ds = dl.load_from_file('/path/to/file')
+ '''
+
+ def __init__(self):
+
+ self._basedir = '.'
+
+ # NOTE: not effective with forks as the main copy does not get updated.
+ # avoids rereading files
+ self._FILE_CACHE = dict()
+
+ # NOTE: not thread safe, also issues with forks not returning data to main proc
+ # so they need to be cleaned independently. See WorkerProcess for example.
+ # used to keep track of temp files for cleaning
+ self._tempfiles = set()
+
+ # initialize the vault stuff with an empty password
+ # TODO: replace with a ref to something that can get the password
+ # a creds/auth provider
+ # self.set_vault_password(None)
+ self._vaults = {}
+ self._vault = VaultLib()
+ self.set_vault_secrets(None)
+
+ # TODO: since we can query vault_secrets late, we could provide this to DataLoader init
+ def set_vault_secrets(self, vault_secrets):
+ self._vault.secrets = vault_secrets
+
+ def load(self, data, file_name='<string>', show_content=True, json_only=False):
+ '''Backwards compat for now'''
+ return from_yaml(data, file_name, show_content, self._vault.secrets, json_only=json_only)
+
+ def load_from_file(self, file_name, cache=True, unsafe=False, json_only=False):
+ ''' Loads data from a file, which can contain either JSON or YAML. '''
+
+ file_name = self.path_dwim(file_name)
+ display.debug("Loading data from %s" % file_name)
+
+ # if the file has already been read in and cached, we'll
+ # return those results to avoid more file/vault operations
+ if cache and file_name in self._FILE_CACHE:
+ parsed_data = self._FILE_CACHE[file_name]
+ else:
+ # read the file contents and load the data structure from them
+ (b_file_data, show_content) = self._get_file_contents(file_name)
+
+ file_data = to_text(b_file_data, errors='surrogate_or_strict')
+ parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content, json_only=json_only)
+
+ # cache the file contents for next time
+ self._FILE_CACHE[file_name] = parsed_data
+
+ if unsafe:
+ return parsed_data
+ else:
+ # return a deep copy here, so the cache is not affected
+ return copy.deepcopy(parsed_data)
+
+ def path_exists(self, path):
+ path = self.path_dwim(path)
+ return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
+
+ def is_file(self, path):
+ path = self.path_dwim(path)
+ return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
+
+ def is_directory(self, path):
+ path = self.path_dwim(path)
+ return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
+
+ def list_directory(self, path):
+ path = self.path_dwim(path)
+ return os.listdir(path)
+
+ def is_executable(self, path):
+ '''is the given path executable?'''
+ path = self.path_dwim(path)
+ return is_executable(path)
+
+ def _decrypt_if_vault_data(self, b_vault_data, b_file_name=None):
+ '''Decrypt b_vault_data if encrypted and return b_data and the show_content flag'''
+
+ if not is_encrypted(b_vault_data):
+ show_content = True
+ return b_vault_data, show_content
+
+ b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data)
+ b_data = self._vault.decrypt(b_vault_data, filename=b_file_name)
+
+ show_content = False
+ return b_data, show_content
+
+ def _get_file_contents(self, file_name):
+ '''
+ Reads the file contents from the given file name
+
+ If the contents are vault-encrypted, it will decrypt them and return
+ the decrypted data
+
+ :arg file_name: The name of the file to read. If this is a relative
+ path, it will be expanded relative to the basedir
+ :raises AnsibleFileNotFound: if the file_name does not refer to a file
+ :raises AnsibleParserError: if we were unable to read the file
+ :return: Returns a byte string of the file contents
+ '''
+ if not file_name or not isinstance(file_name, (binary_type, text_type)):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
+
+ b_file_name = to_bytes(self.path_dwim(file_name))
+ # This is what we really want but have to fix unittests to make it pass
+ # if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
+ if not self.path_exists(b_file_name):
+ raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
+
+ try:
+ with open(b_file_name, 'rb') as f:
+ data = f.read()
+ return self._decrypt_if_vault_data(data, b_file_name)
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e)
+
+ def get_basedir(self):
+ ''' returns the current basedir '''
+ return self._basedir
+
+ def set_basedir(self, basedir):
+ ''' sets the base directory, used to find files when a relative path is given '''
+
+ if basedir is not None:
+ self._basedir = to_text(basedir)
+
+ def path_dwim(self, given):
+ '''
+ make relative paths work like folks expect.
+ '''
+
+ given = unquote(given)
+ given = to_text(given, errors='surrogate_or_strict')
+
+ if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
+ path = given
+ else:
+ basedir = to_text(self._basedir, errors='surrogate_or_strict')
+ path = os.path.join(basedir, given)
+
+ return unfrackpath(path, follow=False)
+
+ def _is_role(self, path):
+ ''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc '''
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_path_dirname = os.path.dirname(b_path)
+ b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
+
+ untasked_paths = (
+ os.path.join(b_path, b'main.yml'),
+ os.path.join(b_path, b'main.yaml'),
+ os.path.join(b_path, b'main'),
+ )
+ tasked_paths = (
+ os.path.join(b_upath, b'tasks/main.yml'),
+ os.path.join(b_upath, b'tasks/main.yaml'),
+ os.path.join(b_upath, b'tasks/main'),
+ os.path.join(b_upath, b'meta/main.yml'),
+ os.path.join(b_upath, b'meta/main.yaml'),
+ os.path.join(b_upath, b'meta/main'),
+ os.path.join(b_path_dirname, b'tasks/main.yml'),
+ os.path.join(b_path_dirname, b'tasks/main.yaml'),
+ os.path.join(b_path_dirname, b'tasks/main'),
+ os.path.join(b_path_dirname, b'meta/main.yml'),
+ os.path.join(b_path_dirname, b'meta/main.yaml'),
+ os.path.join(b_path_dirname, b'meta/main'),
+ )
+
+ exists_untasked = map(os.path.exists, untasked_paths)
+ exists_tasked = map(os.path.exists, tasked_paths)
+ if RE_TASKS.search(path) and any(exists_untasked) or any(exists_tasked):
+ return True
+
+ return False
+
+ def path_dwim_relative(self, path, dirname, source, is_role=False):
+ '''
+ find one file in either a role or playbook dir with or without
+ explicitly named dirname subdirs
+
+ Used in action plugins and lookups to find supplemental files that
+ could be in either place.
+ '''
+
+ search = []
+ source = to_text(source, errors='surrogate_or_strict')
+
+ # I have full path, nothing else needs to be looked at
+ if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'):
+ search.append(unfrackpath(source, follow=False))
+ else:
+ # base role/play path + templates/files/vars + relative filename
+ search.append(os.path.join(path, dirname, source))
+ basedir = unfrackpath(path, follow=False)
+
+ # not told if role, but detect if it is a role and if so make sure you get correct base path
+ if not is_role:
+ is_role = self._is_role(path)
+
+ if is_role and RE_TASKS.search(path):
+ basedir = unfrackpath(os.path.dirname(path), follow=False)
+
+ cur_basedir = self._basedir
+ self.set_basedir(basedir)
+ # resolved base role/play path + templates/files/vars + relative filename
+ search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
+ self.set_basedir(cur_basedir)
+
+ if is_role and not source.endswith(dirname):
+ # look in role's tasks dir w/o dirname
+ search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
+
+ # try to create absolute path for loader basedir + templates/files/vars + filename
+ search.append(unfrackpath(os.path.join(dirname, source), follow=False))
+
+ # try to create absolute path for loader basedir
+ search.append(unfrackpath(os.path.join(basedir, source), follow=False))
+
+ # try to create absolute path for dirname + filename
+ search.append(self.path_dwim(os.path.join(dirname, source)))
+
+ # try to create absolute path for filename
+ search.append(self.path_dwim(source))
+
+ for candidate in search:
+ if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
+ break
+
+ return candidate
+
+ def path_dwim_relative_stack(self, paths, dirname, source, is_role=False):
+ '''
+ find one file in first path in stack taking roles into account and adding play basedir as fallback
+
+ :arg paths: A list of text strings which are the paths to look for the filename in.
+ :arg dirname: A text string representing a directory. The directory
+ is prepended to the source to form the path to search for.
+ :arg source: A text string which is the filename to search for
+ :rtype: A text string
+ :returns: An absolute path to the filename ``source`` if found
+ :raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
+ '''
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ b_source = to_bytes(source, errors='surrogate_or_strict')
+
+ result = None
+ search = []
+ if source is None:
+ display.warning('Invalid request to find a file that matches a "null" value')
+ elif source and (source.startswith('~') or source.startswith(os.path.sep)):
+ # path is absolute, no relative needed, check existence and return source
+ test_path = unfrackpath(b_source, follow=False)
+ if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
+ result = test_path
+ else:
+ display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
+ for path in paths:
+ upath = unfrackpath(path, follow=False)
+ b_upath = to_bytes(upath, errors='surrogate_or_strict')
+ b_pb_base_dir = os.path.dirname(b_upath)
+
+ # if path is in role and 'tasks' not there already, add it into the search
+ if (is_role or self._is_role(path)) and b_pb_base_dir.endswith(b'/tasks'):
+ search.append(os.path.join(os.path.dirname(b_pb_base_dir), b_dirname, b_source))
+ search.append(os.path.join(b_pb_base_dir, b_source))
+ else:
+ # don't add dirname if user already is using it in source
+ if b_source.split(b'/')[0] != dirname:
+ search.append(os.path.join(b_upath, b_dirname, b_source))
+ search.append(os.path.join(b_upath, b_source))
+
+ # always append basedir as last resort
+ # don't add dirname if user already is using it in source
+ if b_source.split(b'/')[0] != dirname:
+ search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_dirname, b_source))
+ search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_source))
+
+ display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
+ for b_candidate in search:
+ display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
+ if os.path.exists(b_candidate):
+ result = to_text(b_candidate)
+ break
+
+ if result is None:
+ raise AnsibleFileNotFound(file_name=source, paths=[to_native(p) for p in search])
+
+ return result
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def get_real_file(self, file_path, decrypt=True):
+ """
+ If the file is vault encrypted return a path to a temporary decrypted file
+ If the file is not encrypted then the path is returned
+ Temporary files are cleanup in the destructor
+ """
+
+ if not file_path or not isinstance(file_path, (binary_type, text_type)):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
+
+ b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
+ if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
+ raise AnsibleFileNotFound(file_name=file_path)
+
+ real_path = self.path_dwim(file_path)
+
+ try:
+ if decrypt:
+ with open(to_bytes(real_path), 'rb') as f:
+ # Limit how much of the file is read since we do not know
+ # whether this is a vault file and therefore it could be very
+ # large.
+ if is_encrypted_file(f, count=len(b_HEADER)):
+ # if the file is encrypted and no password was specified,
+ # the decrypt call would throw an error, but we check first
+ # since the decrypt function doesn't know the file name
+ data = f.read()
+ if not self._vault.secrets:
+ raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
+
+ data = self._vault.decrypt(data, filename=real_path)
+ # Make a temp file
+ real_path = self._create_content_tempfile(data)
+ self._tempfiles.add(real_path)
+
+ return real_path
+
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
+
+ def cleanup_tmp_file(self, file_path):
+ """
+ Removes any temporary files created from a previous call to
+ get_real_file. file_path must be the path returned from a
+ previous call to get_real_file.
+ """
+ if file_path in self._tempfiles:
+ os.unlink(file_path)
+ self._tempfiles.remove(file_path)
+
+ def cleanup_all_tmp_files(self):
+ """
+ Removes all temporary files that DataLoader has created
+ NOTE: not thread safe, forks also need special handling see __init__ for details.
+ """
+ for f in list(self._tempfiles):
+ try:
+ self.cleanup_tmp_file(f)
+ except Exception as e:
+ display.warning("Unable to cleanup temp files: %s" % to_text(e))
+
+ def find_vars_files(self, path, name, extensions=None, allow_dir=True):
+ """
+ Find vars files in a given path with specified name. This will find
+ files in a dir named <name>/ or a file called <name> ending in known
+ extensions.
+ """
+
+ b_path = to_bytes(os.path.join(path, name))
+ found = []
+
+ if extensions is None:
+ # Look for file with no extension first to find dir before file
+ extensions = [''] + C.YAML_FILENAME_EXTENSIONS
+ # add valid extensions to name
+ for ext in extensions:
+
+ if '.' in ext:
+ full_path = b_path + to_bytes(ext)
+ elif ext:
+ full_path = b'.'.join([b_path, to_bytes(ext)])
+ else:
+ full_path = b_path
+
+ if self.path_exists(full_path):
+ if self.is_directory(full_path):
+ if allow_dir:
+ found.extend(self._get_dir_vars_files(to_text(full_path), extensions))
+ else:
+ continue
+ else:
+ found.append(full_path)
+ break
+ return found
+
+ def _get_dir_vars_files(self, path, extensions):
+ found = []
+ for spath in sorted(self.list_directory(path)):
+ if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups
+
+ ext = os.path.splitext(spath)[-1]
+ full_spath = os.path.join(path, spath)
+
+ if self.is_directory(full_spath) and not ext: # recursive search if dir
+ found.extend(self._get_dir_vars_files(full_spath, extensions))
+ elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions):
+ # only consider files with valid extensions or no extension
+ found.append(full_spath)
+
+ return found
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
new file mode 100644
index 0000000..aeb58b0
--- /dev/null
+++ b/lib/ansible/parsing/mod_args.py
@@ -0,0 +1,345 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+from ansible.parsing.splitter import parse_kv, split_args
+from ansible.plugins.loader import module_loader, action_loader
+from ansible.template import Templar
+from ansible.utils.fqcn import add_internal_fqcns
+from ansible.utils.sentinel import Sentinel
+
+
+# For filtering out modules correctly below
+FREEFORM_ACTIONS = frozenset(C.MODULE_REQUIRE_ARGS)
+
+RAW_PARAM_MODULES = FREEFORM_ACTIONS.union(add_internal_fqcns((
+ 'include',
+ 'include_vars',
+ 'include_tasks',
+ 'include_role',
+ 'import_tasks',
+ 'import_role',
+ 'add_host',
+ 'group_by',
+ 'set_fact',
+ 'meta',
+)))
+
+BUILTIN_TASKS = frozenset(add_internal_fqcns((
+ 'meta',
+ 'include',
+ 'include_tasks',
+ 'include_role',
+ 'import_tasks',
+ 'import_role'
+)))
+
+
+class ModuleArgsParser:
+
+ """
+ There are several ways a module and argument set can be expressed:
+
+ # legacy form (for a shell command)
+ - action: shell echo hi
+
+ # common shorthand for local actions vs delegate_to
+ - local_action: shell echo hi
+
+ # most commonly:
+ - copy: src=a dest=b
+
+ # legacy form
+ - action: copy src=a dest=b
+
+ # complex args form, for passing structured data
+ - copy:
+ src: a
+ dest: b
+
+ # gross, but technically legal
+ - action:
+ module: copy
+ args:
+ src: a
+ dest: b
+
+ # Standard YAML form for command-type modules. In this case, the args specified
+ # will act as 'defaults' and will be overridden by any args specified
+ # in one of the other formats (complex args under the action, or
+ # parsed from the k=v string
+ - command: 'pwd'
+ args:
+ chdir: '/tmp'
+
+
+ This class has some of the logic to canonicalize these into the form
+
+ - module: <module_name>
+ delegate_to: <optional>
+ args: <args>
+
+ Args may also be munged for certain shell command parameters.
+ """
+
+ def __init__(self, task_ds=None, collection_list=None):
+ task_ds = {} if task_ds is None else task_ds
+
+ if not isinstance(task_ds, dict):
+ raise AnsibleAssertionError("the type of 'task_ds' should be a dict, but is a %s" % type(task_ds))
+ self._task_ds = task_ds
+ self._collection_list = collection_list
+ # delayed local imports to prevent circular import
+ from ansible.playbook.task import Task
+ from ansible.playbook.handler import Handler
+ # store the valid Task/Handler attrs for quick access
+ self._task_attrs = set(Task.fattributes)
+ self._task_attrs.update(set(Handler.fattributes))
+ # HACK: why are these not FieldAttributes on task with a post-validate to check usage?
+ self._task_attrs.update(['local_action', 'static'])
+ self._task_attrs = frozenset(self._task_attrs)
+
+ self.resolved_action = None
+
+ def _split_module_string(self, module_string):
+ '''
+ when module names are expressed like:
+ action: copy src=a dest=b
+ the first part of the string is the name of the module
+ and the rest are strings pertaining to the arguments.
+ '''
+
+ tokens = split_args(module_string)
+ if len(tokens) > 1:
+ return (tokens[0].strip(), " ".join(tokens[1:]))
+ else:
+ return (tokens[0].strip(), "")
+
+ def _normalize_parameters(self, thing, action=None, additional_args=None):
+ '''
+ arguments can be fuzzy. Deal with all the forms.
+ '''
+
+ additional_args = {} if additional_args is None else additional_args
+
+ # final args are the ones we'll eventually return, so first update
+ # them with any additional args specified, which have lower priority
+ # than those which may be parsed/normalized next
+ final_args = dict()
+ if additional_args:
+ if isinstance(additional_args, string_types):
+ templar = Templar(loader=None)
+ if templar.is_template(additional_args):
+ final_args['_variable_params'] = additional_args
+ else:
+ raise AnsibleParserError("Complex args containing variables cannot use bare variables (without Jinja2 delimiters), "
+ "and must use the full variable style ('{{var_name}}')")
+ elif isinstance(additional_args, dict):
+ final_args.update(additional_args)
+ else:
+ raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
+
+ # how we normalize depends if we figured out what the module name is
+ # yet. If we have already figured it out, it's a 'new style' invocation.
+ # otherwise, it's not
+
+ if action is not None:
+ args = self._normalize_new_style_args(thing, action)
+ else:
+ (action, args) = self._normalize_old_style_args(thing)
+
+ # this can occasionally happen, simplify
+ if args and 'args' in args:
+ tmp_args = args.pop('args')
+ if isinstance(tmp_args, string_types):
+ tmp_args = parse_kv(tmp_args)
+ args.update(tmp_args)
+
+ # only internal variables can start with an underscore, so
+ # we don't allow users to set them directly in arguments
+ if args and action not in FREEFORM_ACTIONS:
+ for arg in args:
+ arg = to_text(arg)
+ if arg.startswith('_ansible_'):
+ raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
+
+ # finally, update the args we're going to return with the ones
+ # which were normalized above
+ if args:
+ final_args.update(args)
+
+ return (action, final_args)
+
+ def _normalize_new_style_args(self, thing, action):
+ '''
+ deals with fuzziness in new style module invocations
+ accepting key=value pairs and dictionaries, and returns
+ a dictionary of arguments
+
+ possible example inputs:
+ 'echo hi', 'shell'
+ {'region': 'xyz'}, 'ec2'
+ standardized outputs like:
+ { _raw_params: 'echo hi', _uses_shell: True }
+ '''
+
+ if isinstance(thing, dict):
+ # form is like: { xyz: { x: 2, y: 3 } }
+ args = thing
+ elif isinstance(thing, string_types):
+ # form is like: copy: src=a dest=b
+ check_raw = action in FREEFORM_ACTIONS
+ args = parse_kv(thing, check_raw=check_raw)
+ elif thing is None:
+ # this can happen with modules which take no params, like ping:
+ args = None
+ else:
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+ return args
+
+ def _normalize_old_style_args(self, thing):
+ '''
+ deals with fuzziness in old-style (action/local_action) module invocations
+ returns tuple of (module_name, dictionary_args)
+
+ possible example inputs:
+ { 'shell' : 'echo hi' }
+ 'shell echo hi'
+ {'module': 'ec2', 'x': 1 }
+ standardized outputs like:
+ ('ec2', { 'x': 1} )
+ '''
+
+ action = None
+ args = None
+
+ if isinstance(thing, dict):
+ # form is like: action: { module: 'copy', src: 'a', dest: 'b' }
+ thing = thing.copy()
+ if 'module' in thing:
+ action, module_args = self._split_module_string(thing['module'])
+ args = thing.copy()
+ check_raw = action in FREEFORM_ACTIONS
+ args.update(parse_kv(module_args, check_raw=check_raw))
+ del args['module']
+
+ elif isinstance(thing, string_types):
+ # form is like: action: copy src=a dest=b
+ (action, args) = self._split_module_string(thing)
+ check_raw = action in FREEFORM_ACTIONS
+ args = parse_kv(args, check_raw=check_raw)
+
+ else:
+ # need a dict or a string, so giving up
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+
+ return (action, args)
+
+ def parse(self, skip_action_validation=False):
+ '''
+ Given a task in one of the supported forms, parses and returns
+ returns the action, arguments, and delegate_to values for the
+ task, dealing with all sorts of levels of fuzziness.
+ '''
+
+ thing = None
+
+ action = None
+ delegate_to = self._task_ds.get('delegate_to', Sentinel)
+ args = dict()
+
+ # This is the standard YAML form for command-type modules. We grab
+ # the args and pass them in as additional arguments, which can/will
+ # be overwritten via dict updates from the other arg sources below
+ additional_args = self._task_ds.get('args', dict())
+
+ # We can have one of action, local_action, or module specified
+ # action
+ if 'action' in self._task_ds:
+ # an old school 'action' statement
+ thing = self._task_ds['action']
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # local_action
+ if 'local_action' in self._task_ds:
+ # local_action is similar but also implies a delegate_to
+ if action is not None:
+ raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
+ thing = self._task_ds.get('local_action', '')
+ delegate_to = 'localhost'
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # module: <stuff> is the more new-style invocation
+
+ # filter out task attributes so we're only querying unrecognized keys as actions/modules
+ non_task_ds = dict((k, v) for k, v in self._task_ds.items() if (k not in self._task_attrs) and (not k.startswith('with_')))
+
+ # walk the filtered input dictionary to see if we recognize a module name
+ for item, value in non_task_ds.items():
+ context = None
+ is_action_candidate = False
+ if item in BUILTIN_TASKS:
+ is_action_candidate = True
+ elif skip_action_validation:
+ is_action_candidate = True
+ else:
+ context = action_loader.find_plugin_with_context(item, collection_list=self._collection_list)
+ if not context.resolved:
+ context = module_loader.find_plugin_with_context(item, collection_list=self._collection_list)
+
+ is_action_candidate = context.resolved and bool(context.redirect_list)
+
+ if is_action_candidate:
+ # finding more than one module name is a problem
+ if action is not None:
+ raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
+
+ if context is not None and context.resolved:
+ self.resolved_action = context.resolved_fqcn
+
+ action = item
+ thing = value
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # if we didn't see any module in the task at all, it's not a task really
+ if action is None:
+ if non_task_ds: # there was one non-task action, but we couldn't find it
+ bad_action = list(non_task_ds.keys())[0]
+ raise AnsibleParserError("couldn't resolve module/action '{0}'. This often indicates a "
+ "misspelling, missing collection, or incorrect module path.".format(bad_action),
+ obj=self._task_ds)
+ else:
+ raise AnsibleParserError("no module/action detected in task.",
+ obj=self._task_ds)
+ elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
+ templar = Templar(loader=None)
+ raw_params = args.pop('_raw_params')
+ if templar.is_template(raw_params):
+ args['_variable_params'] = raw_params
+ else:
+ raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action,
+ ", ".join(RAW_PARAM_MODULES)),
+ obj=self._task_ds)
+
+ return (action, args, delegate_to)
diff --git a/lib/ansible/parsing/plugin_docs.py b/lib/ansible/parsing/plugin_docs.py
new file mode 100644
index 0000000..cda5463
--- /dev/null
+++ b/lib/ansible/parsing/plugin_docs.py
@@ -0,0 +1,227 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import tokenize
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_text, to_native
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+string_to_vars = {
+ 'DOCUMENTATION': 'doc',
+ 'EXAMPLES': 'plainexamples',
+ 'RETURN': 'returndocs',
+ 'ANSIBLE_METADATA': 'metadata', # NOTE: now unused, but kept for backwards compat
+}
+
+
+def _var2string(value):
+ ''' reverse lookup of the dict above '''
+ for k, v in string_to_vars.items():
+ if v == value:
+ return k
+
+
+def _init_doc_dict():
+ ''' initialize a return dict for docs with the expected structure '''
+ return {k: None for k in string_to_vars.values()}
+
+
+def read_docstring_from_yaml_file(filename, verbose=True, ignore_errors=True):
+ ''' Read docs from 'sidecar' yaml file doc for a plugin '''
+
+ data = _init_doc_dict()
+ file_data = {}
+
+ try:
+ with open(filename, 'rb') as yamlfile:
+ file_data = AnsibleLoader(yamlfile.read(), file_name=filename).get_single_data()
+ except Exception as e:
+ msg = "Unable to parse yaml file '%s': %s" % (filename, to_native(e))
+ if not ignore_errors:
+ raise AnsibleParserError(msg, orig_exc=e)
+ elif verbose:
+ display.error(msg)
+
+ if file_data:
+ for key in string_to_vars:
+ data[string_to_vars[key]] = file_data.get(key, None)
+
+ return data
+
+
+def read_docstring_from_python_module(filename, verbose=True, ignore_errors=True):
+ """
+ Use tokenization to search for assignment of the documentation variables in the given file.
+ Parse from YAML and return the resulting python structure or None together with examples as plain text.
+ """
+
+ seen = set()
+ data = _init_doc_dict()
+
+ next_string = None
+ with tokenize.open(filename) as f:
+ tokens = tokenize.generate_tokens(f.readline)
+ for token in tokens:
+
+ # found lable that looks like variable
+ if token.type == tokenize.NAME:
+
+ # label is expected value, in correct place and has not been seen before
+ if token.start == 1 and token.string in string_to_vars and token.string not in seen:
+ # next token that is string has the docs
+ next_string = string_to_vars[token.string]
+ continue
+
+ # previous token indicated this string is a doc string
+ if next_string is not None and token.type == tokenize.STRING:
+
+ # ensure we only process one case of it
+ seen.add(token.string)
+
+ value = token.string
+
+ # strip string modifiers/delimiters
+ if value.startswith(('r', 'b')):
+ value = value.lstrip('rb')
+
+ if value.startswith(("'", '"')):
+ value = value.strip("'\"")
+
+ # actually use the data
+ if next_string == 'plainexamples':
+ # keep as string, can be yaml, but we let caller deal with it
+ data[next_string] = to_text(value)
+ else:
+ # yaml load the data
+ try:
+ data[next_string] = AnsibleLoader(value, file_name=filename).get_single_data()
+ except Exception as e:
+ msg = "Unable to parse docs '%s' in python file '%s': %s" % (_var2string(next_string), filename, to_native(e))
+ if not ignore_errors:
+ raise AnsibleParserError(msg, orig_exc=e)
+ elif verbose:
+ display.error(msg)
+
+ next_string = None
+
+ # if nothing else worked, fall back to old method
+ if not seen:
+ data = read_docstring_from_python_file(filename, verbose, ignore_errors)
+
+ return data
+
+
+def read_docstring_from_python_file(filename, verbose=True, ignore_errors=True):
+ """
+ Use ast to search for assignment of the DOCUMENTATION and EXAMPLES variables in the given file.
+ Parse DOCUMENTATION from YAML and return the YAML doc or None together with EXAMPLES, as plain text.
+ """
+
+ data = _init_doc_dict()
+
+ try:
+ with open(filename, 'rb') as b_module_data:
+ M = ast.parse(b_module_data.read())
+
+ for child in M.body:
+ if isinstance(child, ast.Assign):
+ for t in child.targets:
+ try:
+ theid = t.id
+ except AttributeError:
+ # skip errors can happen when trying to use the normal code
+ display.warning("Building documentation, failed to assign id for %s on %s, skipping" % (t, filename))
+ continue
+
+ if theid in string_to_vars:
+ varkey = string_to_vars[theid]
+ if isinstance(child.value, ast.Dict):
+ data[varkey] = ast.literal_eval(child.value)
+ else:
+ if theid == 'EXAMPLES':
+ # examples 'can' be yaml, but even if so, we dont want to parse as such here
+ # as it can create undesired 'objects' that don't display well as docs.
+ data[varkey] = to_text(child.value.s)
+ else:
+ # string should be yaml if already not a dict
+ data[varkey] = AnsibleLoader(child.value.s, file_name=filename).get_single_data()
+
+ display.debug('Documentation assigned: %s' % varkey)
+
+ except Exception as e:
+ msg = "Unable to parse documentation in python file '%s': %s" % (filename, to_native(e))
+ if not ignore_errors:
+ raise AnsibleParserError(msg, orig_exc=e)
+ elif verbose:
+ display.error(msg)
+
+ return data
+
+
+def read_docstring(filename, verbose=True, ignore_errors=True):
+ ''' returns a documentation dictionary from Ansible plugin docstrings '''
+
+ # NOTE: adjacency of doc file to code file is responsibility of caller
+ if filename.endswith(C.YAML_DOC_EXTENSIONS):
+ docstring = read_docstring_from_yaml_file(filename, verbose=verbose, ignore_errors=ignore_errors)
+ elif filename.endswith(C.PYTHON_DOC_EXTENSIONS):
+ docstring = read_docstring_from_python_module(filename, verbose=verbose, ignore_errors=ignore_errors)
+ elif not ignore_errors:
+ raise AnsibleError("Unknown documentation format: %s" % to_native(filename))
+
+ if not docstring and not ignore_errors:
+ raise AnsibleError("Unable to parse documentation for: %s" % to_native(filename))
+
+ # cause seealso is specially processed from 'doc' later on
+ # TODO: stop any other 'overloaded' implementation in main doc
+ docstring['seealso'] = None
+
+ return docstring
+
+
+def read_docstub(filename):
+ """
+ Quickly find short_description using string methods instead of node parsing.
+ This does not return a full set of documentation strings and is intended for
+ operations like ansible-doc -l.
+ """
+
+ in_documentation = False
+ capturing = False
+ indent_detection = ''
+ doc_stub = []
+
+ with open(filename, 'r') as t_module_data:
+ for line in t_module_data:
+ if in_documentation:
+ # start capturing the stub until indentation returns
+ if capturing and line.startswith(indent_detection):
+ doc_stub.append(line)
+
+ elif capturing and not line.startswith(indent_detection):
+ break
+
+ elif line.lstrip().startswith('short_description:'):
+ capturing = True
+ # Detect that the short_description continues on the next line if it's indented more
+ # than short_description itself.
+ indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1)
+ doc_stub.append(line)
+
+ elif line.startswith('DOCUMENTATION') and ('=' in line or ':' in line):
+ in_documentation = True
+
+ short_description = r''.join(doc_stub).strip().rstrip('.')
+ data = AnsibleLoader(short_description, file_name=filename).get_single_data()
+
+ return data
diff --git a/lib/ansible/parsing/quoting.py b/lib/ansible/parsing/quoting.py
new file mode 100644
index 0000000..d3a38d9
--- /dev/null
+++ b/lib/ansible/parsing/quoting.py
@@ -0,0 +1,31 @@
+# (c) 2014 James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def is_quoted(data):
+ return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
+
+
+def unquote(data):
+ ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
+ if is_quoted(data):
+ return data[1:-1]
+ return data
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
new file mode 100644
index 0000000..b68444f
--- /dev/null
+++ b/lib/ansible/parsing/splitter.py
@@ -0,0 +1,286 @@
+# (c) 2014 James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import codecs
+import re
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_text
+from ansible.parsing.quoting import unquote
+
+
+# Decode escapes adapted from rspeer's answer here:
+# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
+_HEXCHAR = '[a-fA-F0-9]'
+_ESCAPE_SEQUENCE_RE = re.compile(r'''
+ ( \\U{0} # 8-digit hex escapes
+ | \\u{1} # 4-digit hex escapes
+ | \\x{2} # 2-digit hex escapes
+ | \\N\{{[^}}]+\}} # Unicode characters by name
+ | \\[\\'"abfnrtv] # Single-character escapes
+ )'''.format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
+
+
+def _decode_escapes(s):
+ def decode_match(match):
+ return codecs.decode(match.group(0), 'unicode-escape')
+
+ return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
+
+
+def parse_kv(args, check_raw=False):
+ '''
+ Convert a string of key/value items to a dict. If any free-form params
+ are found and the check_raw option is set to True, they will be added
+ to a new parameter called '_raw_params'. If check_raw is not enabled,
+ they will simply be ignored.
+ '''
+
+ args = to_text(args, nonstring='passthru')
+
+ options = {}
+ if args is not None:
+ try:
+ vargs = split_args(args)
+ except IndexError as e:
+ raise AnsibleParserError("Unable to parse argument string", orig_exc=e)
+ except ValueError as ve:
+ if 'no closing quotation' in str(ve).lower():
+ raise AnsibleParserError("error parsing argument string, try quoting the entire line.", orig_exc=ve)
+ else:
+ raise
+
+ raw_params = []
+ for orig_x in vargs:
+ x = _decode_escapes(orig_x)
+ if "=" in x:
+ pos = 0
+ try:
+ while True:
+ pos = x.index('=', pos + 1)
+ if pos > 0 and x[pos - 1] != '\\':
+ break
+ except ValueError:
+ # ran out of string, but we must have some escaped equals,
+ # so replace those and append this to the list of raw params
+ raw_params.append(x.replace('\\=', '='))
+ continue
+
+ k = x[:pos]
+ v = x[pos + 1:]
+
+ # FIXME: make the retrieval of this list of shell/command options a function, so the list is centralized
+ if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn', 'stdin', 'stdin_add_newline', 'strip_empty_ends'):
+ raw_params.append(orig_x)
+ else:
+ options[k.strip()] = unquote(v.strip())
+ else:
+ raw_params.append(orig_x)
+
+ # recombine the free-form params, if any were found, and assign
+ # them to a special option for use later by the shell/command module
+ if len(raw_params) > 0:
+ options[u'_raw_params'] = join_args(raw_params)
+
+ return options
+
+
+def _get_quote_state(token, quote_char):
+ '''
+ the goal of this block is to determine if the quoted string
+ is unterminated in which case it needs to be put back together
+ '''
+ # the char before the current one, used to see if
+ # the current character is escaped
+ prev_char = None
+ for idx, cur_char in enumerate(token):
+ if idx > 0:
+ prev_char = token[idx - 1]
+ if cur_char in '"\'' and prev_char != '\\':
+ if quote_char:
+ if cur_char == quote_char:
+ quote_char = None
+ else:
+ quote_char = cur_char
+ return quote_char
+
+
+def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
+ '''
+ this function counts the number of opening/closing blocks for a
+ given opening/closing type and adjusts the current depth for that
+ block based on the difference
+ '''
+ num_open = token.count(open_token)
+ num_close = token.count(close_token)
+ if num_open != num_close:
+ cur_depth += (num_open - num_close)
+ if cur_depth < 0:
+ cur_depth = 0
+ return cur_depth
+
+
+def join_args(s):
+ '''
+ Join the original cmd based on manipulations by split_args().
+ This retains the original newlines and whitespaces.
+ '''
+ result = ''
+ for p in s:
+ if len(result) == 0 or result.endswith('\n'):
+ result += p
+ else:
+ result += ' ' + p
+ return result
+
+
+def split_args(args):
+ '''
+ Splits args on whitespace, but intelligently reassembles
+ those that may have been split over a jinja2 block or quotes.
+
+ When used in a remote module, we won't ever have to be concerned about
+ jinja2 blocks, however this function is/will be used in the
+ core portions as well before the args are templated.
+
+ example input: a=b c="foo bar"
+ example output: ['a=b', 'c="foo bar"']
+
+ Basically this is a variation shlex that has some more intelligence for
+ how Ansible needs to use it.
+ '''
+
+ # the list of params parsed out of the arg string
+ # this is going to be the result value when we are done
+ params = []
+
+ # Initial split on newlines
+ items = args.split('\n')
+
+ # iterate over the tokens, and reassemble any that may have been
+ # split on a space inside a jinja2 block.
+ # ex if tokens are "{{", "foo", "}}" these go together
+
+ # These variables are used
+ # to keep track of the state of the parsing, since blocks and quotes
+ # may be nested within each other.
+
+ quote_char = None
+ inside_quotes = False
+ print_depth = 0 # used to count nested jinja2 {{ }} blocks
+ block_depth = 0 # used to count nested jinja2 {% %} blocks
+ comment_depth = 0 # used to count nested jinja2 {# #} blocks
+
+ # now we loop over each split chunk, coalescing tokens if the white space
+ # split occurred within quotes or a jinja2 block of some kind
+ for (itemidx, item) in enumerate(items):
+
+ # we split on spaces and newlines separately, so that we
+ # can tell which character we split on for reassembly
+ # inside quotation characters
+ tokens = item.split(' ')
+
+ line_continuation = False
+ for (idx, token) in enumerate(tokens):
+
+ # Empty entries means we have subsequent spaces
+ # We want to hold onto them so we can reconstruct them later
+ if len(token) == 0 and idx != 0:
+ params[-1] += ' '
+ continue
+
+ # if we hit a line continuation character, but
+ # we're not inside quotes, ignore it and continue
+ # on to the next token while setting a flag
+ if token == '\\' and not inside_quotes:
+ line_continuation = True
+ continue
+
+ # store the previous quoting state for checking later
+ was_inside_quotes = inside_quotes
+ quote_char = _get_quote_state(token, quote_char)
+ inside_quotes = quote_char is not None
+
+ # multiple conditions may append a token to the list of params,
+ # so we keep track with this flag to make sure it only happens once
+ # append means add to the end of the list, don't append means concatenate
+ # it to the end of the last token
+ appended = False
+
+ # if we're inside quotes now, but weren't before, append the token
+ # to the end of the list, since we'll tack on more to it later
+ # otherwise, if we're inside any jinja2 block, inside quotes, or we were
+ # inside quotes (but aren't now) concat this token to the last param
+ if inside_quotes and not was_inside_quotes and not (print_depth or block_depth or comment_depth):
+ params.append(token)
+ appended = True
+ elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
+ if idx == 0 and was_inside_quotes:
+ params[-1] = "%s%s" % (params[-1], token)
+ elif len(tokens) > 1:
+ spacer = ''
+ if idx > 0:
+ spacer = ' '
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ else:
+ params[-1] = "%s\n%s" % (params[-1], token)
+ appended = True
+
+ # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
+ # and may append the current token to the params (if we haven't previously done so)
+ prev_print_depth = print_depth
+ print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
+ if print_depth != prev_print_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_block_depth = block_depth
+ block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
+ if block_depth != prev_block_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_comment_depth = comment_depth
+ comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
+ if comment_depth != prev_comment_depth and not appended:
+ params.append(token)
+ appended = True
+
+ # finally, if we're at zero depth for all blocks and not inside quotes, and have not
+ # yet appended anything to the list of params, we do so now
+ if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
+ params.append(token)
+
+ # if this was the last token in the list, and we have more than
+ # one item (meaning we split on newlines), add a newline back here
+ # to preserve the original structure
+ if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
+ params[-1] += '\n'
+
+ # always clear the line continuation flag
+ line_continuation = False
+
+ # If we're done and things are not at zero depth or we're still inside quotes,
+ # raise an error to indicate that the args were unbalanced
+ if print_depth or block_depth or comment_depth or inside_quotes:
+ raise AnsibleParserError(u"failed at splitting arguments, either an unbalanced jinja2 block or quotes: {0}".format(args))
+
+ return params
diff --git a/lib/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py
new file mode 100644
index 0000000..ae8ccff
--- /dev/null
+++ b/lib/ansible/parsing/utils/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py
new file mode 100644
index 0000000..0096af4
--- /dev/null
+++ b/lib/ansible/parsing/utils/addresses.py
@@ -0,0 +1,216 @@
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.errors import AnsibleParserError, AnsibleError
+
+# Components that match a numeric or alphanumeric begin:end or begin:end:step
+# range expression inside square brackets.
+
+numeric_range = r'''
+ \[
+ (?:[0-9]+:[0-9]+) # numeric begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+hexadecimal_range = r'''
+ \[
+ (?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+alphanumeric_range = r'''
+ \[
+ (?:
+ [a-z]:[a-z]| # one-char alphabetic range
+ [0-9]+:[0-9]+ # ...or a numeric one
+ )
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+# Components that match a 16-bit portion of an IPv6 address in hexadecimal
+# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
+# (0..255) or an [x:y(:z)] numeric range.
+
+ipv6_component = r'''
+ (?:
+ [0-9a-f]{{1,4}}| # 0..ffff
+ {range} # or a numeric range
+ )
+'''.format(range=hexadecimal_range)
+
+ipv4_component = r'''
+ (?:
+ [01]?[0-9]{{1,2}}| # 0..199
+ 2[0-4][0-9]| # 200..249
+ 25[0-5]| # 250..255
+ {range} # or a numeric range
+ )
+'''.format(range=numeric_range)
+
+# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
+# characters plus dashes (and underscores) or valid ranges. The label may not
+# start or end with a hyphen or an underscore. This is interpolated into the
+# hostname pattern below. We don't try to enforce the 63-char length limit.
+
+label = r'''
+ (?:[\w]|{range}) # Starts with an alphanumeric or a range
+ (?:[\w_-]|{range})* # Then zero or more of the same or [_-]
+ (?<![_-]) # ...as long as it didn't end with [_-]
+'''.format(range=alphanumeric_range)
+
+patterns = {
+ # This matches a square-bracketed expression with a port specification. What
+ # is inside the square brackets is validated later.
+
+ 'bracketed_hostport': re.compile(
+ r'''^
+ \[(.+)\] # [host identifier]
+ :([0-9]+) # :port number
+ $
+ ''', re.X
+ ),
+
+ # This matches a bare IPv4 address or hostname (or host pattern including
+ # [x:y(:z)] ranges) with a port specification.
+
+ 'hostport': re.compile(
+ r'''^
+ ((?: # We want to match:
+ [^:\[\]] # (a non-range character
+ | # ...or...
+ \[[^\]]*\] # a complete bracketed expression)
+ )*) # repeated as many times as possible
+ :([0-9]+) # followed by a port number
+ $
+ ''', re.X
+ ),
+
+ # This matches an IPv4 address, but also permits range expressions.
+
+ 'ipv4': re.compile(
+ r'''^
+ (?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
+ $
+ '''.format(i4=ipv4_component), re.X | re.I
+ ),
+
+ # This matches an IPv6 address, but also permits range expressions.
+ #
+ # This expression looks complex, but it really only spells out the various
+ # combinations in which the basic unit of an IPv6 address (0..ffff) can be
+ # written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
+ # as ::ffff:192.0.2.3.
+ #
+ # Note that we can't just use ipaddress.ip_address() because we also have to
+ # accept ranges in place of each component.
+
+ 'ipv6': re.compile(
+ r'''^
+ (?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
+ (?:{0}:){{1,6}}:| # compressed variants, which are all
+ (?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
+ (?:{0}:){{2}}(?::{0}){{1,5}}|
+ (?:{0}:){{3}}(?::{0}){{1,4}}|
+ (?:{0}:){{4}}(?::{0}){{1,3}}|
+ (?:{0}:){{5}}(?::{0}){{1,2}}|
+ (?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
+ :(?::{0}){{1,6}}| # ::ffff(:ffff...)
+ {0}?::| # ffff::, ::
+ # ipv4-in-ipv6 variants
+ (?:0:){{6}}(?:{0}\.){{3}}{0}|
+ ::(?:ffff:)?(?:{0}\.){{3}}{0}|
+ (?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
+ $
+ '''.format(ipv6_component), re.X | re.I
+ ),
+
+ # This matches a hostname or host pattern including [x:y(:z)] ranges.
+ #
+ # We roughly follow DNS rules here, but also allow ranges (and underscores).
+ # In the past, no systematic rules were enforced about inventory hostnames,
+ # but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
+ # various metacharacters anyway.
+ #
+ # We don't enforce DNS length restrictions here (63 characters per label,
+ # 253 characters total) or make any attempt to process IDNs.
+
+ 'hostname': re.compile(
+ r'''^
+ {label} # We must have at least one label
+ (?:\.{label})* # Followed by zero or more .labels
+ $
+ '''.format(label=label), re.X | re.I | re.UNICODE
+ ),
+
+}
+
+
+def parse_address(address, allow_ranges=False):
+ """
+ Takes a string and returns a (host, port) tuple. If the host is None, then
+ the string could not be parsed as a host identifier with an optional port
+ specification. If the port is None, then no port was specified.
+
+ The host identifier may be a hostname (qualified or not), an IPv4 address,
+ or an IPv6 address. If allow_ranges is True, then any of those may contain
+ [x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
+
+ The port number is an optional :NN suffix on an IPv4 address or host name,
+ or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
+ IPv4 address, or host name. (This means the only way to specify a port for
+ an IPv6 address is to enclose it in square brackets.)
+ """
+
+ # First, we extract the port number if one is specified.
+
+ port = None
+ for matching in ['bracketed_hostport', 'hostport']:
+ m = patterns[matching].match(address)
+ if m:
+ (address, port) = m.groups()
+ port = int(port)
+ continue
+
+ # What we're left with now must be an IPv4 or IPv6 address, possibly with
+ # numeric ranges, or a hostname with alphanumeric ranges.
+
+ host = None
+ for matching in ['ipv4', 'ipv6', 'hostname']:
+ m = patterns[matching].match(address)
+ if m:
+ host = address
+ continue
+
+ # If it isn't any of the above, we don't understand it.
+ if not host:
+ raise AnsibleError("Not a valid network hostname: %s" % address)
+
+ # If we get to this point, we know that any included ranges are valid.
+ # If the caller is prepared to handle them, all is well.
+ # Otherwise we treat it as a parse failure.
+ if not allow_ranges and '[' in host:
+ raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
+
+ return (host, port)
diff --git a/lib/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
new file mode 100644
index 0000000..19ebc56
--- /dev/null
+++ b/lib/ansible/parsing/utils/jsonify.py
@@ -0,0 +1,38 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def jsonify(result, format=False):
+ ''' format JSON output (uncompressed or uncompressed) '''
+
+ if result is None:
+ return "{}"
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result, sort_keys=True, indent=indent)
diff --git a/lib/ansible/parsing/utils/yaml.py b/lib/ansible/parsing/utils/yaml.py
new file mode 100644
index 0000000..91e37f9
--- /dev/null
+++ b/lib/ansible/parsing/utils/yaml.py
@@ -0,0 +1,84 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from yaml import YAMLError
+
+from ansible.errors import AnsibleParserError
+from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
+from ansible.module_utils._text import to_native
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.parsing.ajson import AnsibleJSONDecoder
+
+
+__all__ = ('from_yaml',)
+
+
+def _handle_error(json_exc, yaml_exc, file_name, show_content):
+ '''
+ Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
+ file name/position where a YAML exception occurred, and raises an AnsibleParserError
+ to display the syntax exception information.
+ '''
+
+ # if the YAML exception contains a problem mark, use it to construct
+ # an object the error class can use to display the faulty line
+ err_obj = None
+ if hasattr(yaml_exc, 'problem_mark'):
+ err_obj = AnsibleBaseYAMLObject()
+ err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
+
+ n_yaml_syntax_error = YAML_SYNTAX_ERROR % to_native(getattr(yaml_exc, 'problem', u''))
+ n_err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
+ 'JSON: %s\n\n%s' % (to_native(json_exc), n_yaml_syntax_error)
+
+ raise AnsibleParserError(n_err_msg, obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
+
+
+def _safe_load(stream, file_name=None, vault_secrets=None):
+ ''' Implements yaml.safe_load(), except using our custom loader class. '''
+
+ loader = AnsibleLoader(stream, file_name, vault_secrets)
+ try:
+ return loader.get_single_data()
+ finally:
+ try:
+ loader.dispose()
+ except AttributeError:
+ pass # older versions of yaml don't have dispose function, ignore
+
+
+def from_yaml(data, file_name='<string>', show_content=True, vault_secrets=None, json_only=False):
+ '''
+ Creates a python datastructure from the given data, which can be either
+ a JSON or YAML string.
+ '''
+ new_data = None
+
+ try:
+ # in case we have to deal with vaults
+ AnsibleJSONDecoder.set_secrets(vault_secrets)
+
+ # we first try to load this data as JSON.
+ # Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
+ new_data = json.loads(data, cls=AnsibleJSONDecoder)
+ except Exception as json_exc:
+
+ if json_only:
+ raise AnsibleParserError(to_native(json_exc), orig_exc=json_exc)
+
+ # must not be JSON, let the rest try
+ try:
+ new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
+ except YAMLError as yaml_exc:
+ _handle_error(json_exc, yaml_exc, file_name, show_content)
+
+ return new_data
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
new file mode 100644
index 0000000..8ac22d4
--- /dev/null
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -0,0 +1,1289 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fcntl
+import os
+import random
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+import warnings
+
+from binascii import hexlify
+from binascii import unhexlify
+from binascii import Error as BinasciiError
+
+HAS_CRYPTOGRAPHY = False
+CRYPTOGRAPHY_BACKEND = None
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ from cryptography.exceptions import InvalidSignature
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes, padding
+ from cryptography.hazmat.primitives.hmac import HMAC
+ from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
+ from cryptography.hazmat.primitives.ciphers import (
+ Cipher as C_Cipher, algorithms, modes
+ )
+ CRYPTOGRAPHY_BACKEND = default_backend()
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ pass
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible import constants as C
+from ansible.module_utils.six import binary_type
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe, unfrackpath
+
+display = Display()
+
+
+b_HEADER = b'$ANSIBLE_VAULT'
+CIPHER_WHITELIST = frozenset((u'AES256',))
+CIPHER_WRITE_WHITELIST = frozenset((u'AES256',))
+# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
+# (used in VaultFile header) to a cipher class
+
+NEED_CRYPTO_LIBRARY = "ansible-vault requires the cryptography library in order to function"
+
+
+class AnsibleVaultError(AnsibleError):
+ pass
+
+
+class AnsibleVaultPasswordError(AnsibleVaultError):
+ pass
+
+
+class AnsibleVaultFormatError(AnsibleError):
+ pass
+
+
+def is_encrypted(data):
+ """ Test if this is vault encrypted data blob
+
+ :arg data: a byte or text string to test whether it is recognized as vault
+ encrypted data
+ :returns: True if it is recognized. Otherwise, False.
+ """
+ try:
+ # Make sure we have a byte string and that it only contains ascii
+ # bytes.
+ b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
+ except (UnicodeError, TypeError):
+ # The vault format is pure ascii so if we failed to encode to bytes
+ # via ascii we know that this is not vault data.
+ # Similarly, if it's not a string, it's not vault data
+ return False
+
+ if b_data.startswith(b_HEADER):
+ return True
+ return False
+
+
+def is_encrypted_file(file_obj, start_pos=0, count=-1):
+ """Test if the contents of a file obj are a vault encrypted data blob.
+
+ :arg file_obj: A file object that will be read from.
+ :kwarg start_pos: A byte offset in the file to start reading the header
+ from. Defaults to 0, the beginning of the file.
+ :kwarg count: Read up to this number of bytes from the file to determine
+ if it looks like encrypted vault data. The default is -1, read to the
+ end of file.
+ :returns: True if the file looks like a vault file. Otherwise, False.
+ """
+ # read the header and reset the file stream to where it started
+ current_position = file_obj.tell()
+ try:
+ file_obj.seek(start_pos)
+ return is_encrypted(file_obj.read(count))
+
+ finally:
+ file_obj.seek(current_position)
+
+
+def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
+
+ b_tmpdata = b_vaulttext_envelope.splitlines()
+ b_tmpheader = b_tmpdata[0].strip().split(b';')
+
+ b_version = b_tmpheader[1].strip()
+ cipher_name = to_text(b_tmpheader[2].strip())
+ vault_id = default_vault_id
+
+ # Only attempt to find vault_id if the vault file is version 1.2 or newer
+ # if self.b_version == b'1.2':
+ if len(b_tmpheader) >= 4:
+ vault_id = to_text(b_tmpheader[3].strip())
+
+ b_ciphertext = b''.join(b_tmpdata[1:])
+
+ return b_ciphertext, b_version, cipher_name, vault_id
+
+
+def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
+ """Parse the vaulttext envelope
+
+ When data is saved, it has a header prepended and is formatted into 80
+ character lines. This method extracts the information from the header
+ and then removes the header and the inserted newlines. The string returned
+ is suitable for processing by the Cipher classes.
+
+ :arg b_vaulttext: byte str containing the data from a save file
+ :kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
+ :kwarg filename: The filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted. This is optional.
+ :returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
+ a byte str of the vault format version,
+ the name of the cipher used, and the vault_id.
+ :raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
+ """
+ # used by decrypt
+ default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
+
+ try:
+ return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
+ except Exception as exc:
+ msg = "Vault envelope format error"
+ if filename:
+ msg += ' in %s' % (filename)
+ msg += ': %s' % exc
+ raise AnsibleVaultFormatError(msg)
+
+
+def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
+ """ Add header and format to 80 columns
+
+ :arg b_ciphertext: the encrypted and hexlified data as a byte string
+ :arg cipher_name: unicode cipher name (for ex, u'AES256')
+ :arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
+ :arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
+ :returns: a byte str that should be dumped into a file. It's
+ formatted to 80 char columns and has the header prepended
+ """
+
+ if not cipher_name:
+ raise AnsibleError("the cipher must be set before adding a header")
+
+ version = version or '1.1'
+
+ # If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
+ if vault_id and vault_id != u'default':
+ version = '1.2'
+
+ b_version = to_bytes(version, 'utf-8', errors='strict')
+ b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
+ b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
+
+ header_parts = [b_HEADER,
+ b_version,
+ b_cipher_name]
+
+ if b_version == b'1.2' and b_vault_id:
+ header_parts.append(b_vault_id)
+
+ header = b';'.join(header_parts)
+
+ b_vaulttext = [header]
+ b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
+ b_vaulttext += [b'']
+ b_vaulttext = b'\n'.join(b_vaulttext)
+
+ return b_vaulttext
+
+
+def _unhexlify(b_data):
+ try:
+ return unhexlify(b_data)
+ except (BinasciiError, TypeError) as exc:
+ raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
+
+
+def _parse_vaulttext(b_vaulttext):
+ b_vaulttext = _unhexlify(b_vaulttext)
+ b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
+ b_salt = _unhexlify(b_salt)
+ b_ciphertext = _unhexlify(b_ciphertext)
+
+ return b_ciphertext, b_salt, b_crypted_hmac
+
+
+def parse_vaulttext(b_vaulttext):
+ """Parse the vaulttext
+
+ :arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
+ :returns: A tuple of byte str of the ciphertext suitable for passing to a
+ Cipher class's decrypt() function, a byte str of the salt,
+ and a byte str of the crypted_hmac
+ :raises: AnsibleVaultFormatError: if the vaulttext format is invalid
+ """
+ # SPLIT SALT, DIGEST, AND DATA
+ try:
+ return _parse_vaulttext(b_vaulttext)
+ except AnsibleVaultFormatError:
+ raise
+ except Exception as exc:
+ msg = "Vault vaulttext format error: %s" % exc
+ raise AnsibleVaultFormatError(msg)
+
+
+def verify_secret_is_not_empty(secret, msg=None):
+ '''Check the secret against minimal requirements.
+
+ Raises: AnsibleVaultPasswordError if the password does not meet requirements.
+
+ Currently, only requirement is that the password is not None or an empty string.
+ '''
+ msg = msg or 'Invalid vault password was provided'
+ if not secret:
+ raise AnsibleVaultPasswordError(msg)
+
+
+class VaultSecret:
+ '''Opaque/abstract objects for a single vault secret. ie, a password or a key.'''
+
+ def __init__(self, _bytes=None):
+ # FIXME: ? that seems wrong... Unset etc?
+ self._bytes = _bytes
+
+ @property
+ def bytes(self):
+ '''The secret as a bytestring.
+
+ Sub classes that store text types will need to override to encode the text to bytes.
+ '''
+ return self._bytes
+
+ def load(self):
+ return self._bytes
+
+
+class PromptVaultSecret(VaultSecret):
+ default_prompt_formats = ["Vault password (%s): "]
+
+ def __init__(self, _bytes=None, vault_id=None, prompt_formats=None):
+ super(PromptVaultSecret, self).__init__(_bytes=_bytes)
+ self.vault_id = vault_id
+
+ if prompt_formats is None:
+ self.prompt_formats = self.default_prompt_formats
+ else:
+ self.prompt_formats = prompt_formats
+
+ @property
+ def bytes(self):
+ return self._bytes
+
+ def load(self):
+ self._bytes = self.ask_vault_passwords()
+
+ def ask_vault_passwords(self):
+ b_vault_passwords = []
+
+ for prompt_format in self.prompt_formats:
+ prompt = prompt_format % {'vault_id': self.vault_id}
+ try:
+ vault_pass = display.prompt(prompt, private=True)
+ except EOFError:
+ raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id)
+
+ verify_secret_is_not_empty(vault_pass)
+
+ b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
+ b_vault_passwords.append(b_vault_pass)
+
+ # Make sure the passwords match by comparing them all to the first password
+ for b_vault_password in b_vault_passwords:
+ self.confirm(b_vault_passwords[0], b_vault_password)
+
+ if b_vault_passwords:
+ return b_vault_passwords[0]
+
+ return None
+
+ def confirm(self, b_vault_pass_1, b_vault_pass_2):
+ # enforce no newline chars at the end of passwords
+
+ if b_vault_pass_1 != b_vault_pass_2:
+ # FIXME: more specific exception
+ raise AnsibleError("Passwords do not match")
+
+
+def script_is_client(filename):
+ '''Determine if a vault secret script is a client script that can be given --vault-id args'''
+
+ # if password script is 'something-client' or 'something-client.[sh|py|rb|etc]'
+ # script_name can still have '.' or could be entire filename if there is no ext
+ script_name, dummy = os.path.splitext(filename)
+
+ # TODO: for now, this is entirely based on filename
+ if script_name.endswith('-client'):
+ return True
+
+ return False
+
+
+def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None):
+ ''' Get secret from file content or execute file and get secret from stdout '''
+
+ # we unfrack but not follow the full path/context to possible vault script
+ # so when the script uses 'adjacent' file for configuration or similar
+ # it still works (as inventory scripts often also do).
+ # while files from --vault-password-file are already unfracked, other sources are not
+ this_path = unfrackpath(filename, follow=False)
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
+
+ # it is a script?
+ if loader.is_executable(this_path):
+
+ if script_is_client(filename):
+ # this is special script type that handles vault ids
+ display.vvvv(u'The vault password file %s is a client script.' % to_text(this_path))
+ # TODO: pass vault_id_name to script via cli
+ return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id, encoding=encoding, loader=loader)
+
+ # just a plain vault password script. No args, returns a byte array
+ return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader)
+
+ return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader)
+
+
+# TODO: mv these classes to a separate file so we don't pollute vault with 'subprocess' etc
+class FileVaultSecret(VaultSecret):
+ def __init__(self, filename=None, encoding=None, loader=None):
+ super(FileVaultSecret, self).__init__()
+ self.filename = filename
+ self.loader = loader
+
+ self.encoding = encoding or 'utf8'
+
+ # We could load from file here, but that is eventually a pain to test
+ self._bytes = None
+ self._text = None
+
+ @property
+ def bytes(self):
+ if self._bytes:
+ return self._bytes
+ if self._text:
+ return self._text.encode(self.encoding)
+ return None
+
+ def load(self):
+ self._bytes = self._read_file(self.filename)
+
+ def _read_file(self, filename):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+
+ # TODO: replace with use of self.loader
+ try:
+ with open(filename, "rb") as f:
+ vault_pass = f.read().strip()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
+
+ b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
+
+ vault_pass = b_vault_data.strip(b'\r\n')
+
+ verify_secret_is_not_empty(vault_pass,
+ msg='Invalid vault password was provided from file (%s)' % filename)
+
+ return vault_pass
+
+ def __repr__(self):
+ if self.filename:
+ return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
+ return "%s()" % (self.__class__.__name__)
+
+
+class ScriptVaultSecret(FileVaultSecret):
+ def _read_file(self, filename):
+ if not self.loader.is_executable(filename):
+ raise AnsibleVaultError("The vault password script %s was not executable" % filename)
+
+ command = self._build_command()
+
+ stdout, stderr, p = self._run(command)
+
+ self._check_results(stdout, stderr, p)
+
+ vault_pass = stdout.strip(b'\r\n')
+
+ empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename
+ verify_secret_is_not_empty(vault_pass, msg=empty_password_msg)
+
+ return vault_pass
+
+ def _run(self, command):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(command, stdout=subprocess.PIPE)
+ except OSError as e:
+ msg_format = "Problem running vault password script %s (%s)." \
+ " If this is not a script, remove the executable bit from the file."
+ msg = msg_format % (self.filename, e)
+
+ raise AnsibleError(msg)
+
+ stdout, stderr = p.communicate()
+ return stdout, stderr, p
+
+ def _check_results(self, stdout, stderr, popen):
+ if popen.returncode != 0:
+ raise AnsibleError("Vault password script %s returned non-zero (%s): %s" %
+ (self.filename, popen.returncode, stderr))
+
+ def _build_command(self):
+ return [self.filename]
+
+
+class ClientScriptVaultSecret(ScriptVaultSecret):
+ VAULT_ID_UNKNOWN_RC = 2
+
+ def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
+ super(ClientScriptVaultSecret, self).__init__(filename=filename,
+ encoding=encoding,
+ loader=loader)
+ self._vault_id = vault_id
+ display.vvvv(u'Executing vault password client script: %s --vault-id %s' % (to_text(filename), to_text(vault_id)))
+
+ def _run(self, command):
+ try:
+ p = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError as e:
+ msg_format = "Problem running vault password client script %s (%s)." \
+ " If this is not a script, remove the executable bit from the file."
+ msg = msg_format % (self.filename, e)
+
+ raise AnsibleError(msg)
+
+ stdout, stderr = p.communicate()
+ return stdout, stderr, p
+
+ def _check_results(self, stdout, stderr, popen):
+ if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
+ raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
+ (self.filename, self._vault_id, stderr))
+
+ if popen.returncode != 0:
+ raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
+ (self.filename, popen.returncode, self._vault_id, stderr))
+
+ def _build_command(self):
+ command = [self.filename]
+ if self._vault_id:
+ command.extend(['--vault-id', self._vault_id])
+
+ return command
+
+ def __repr__(self):
+ if self.filename:
+ return "%s(filename='%s', vault_id='%s')" % \
+ (self.__class__.__name__, self.filename, self._vault_id)
+ return "%s()" % (self.__class__.__name__)
+
+
+def match_secrets(secrets, target_vault_ids):
+ '''Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets'''
+ if not secrets:
+ return []
+
+ matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
+ return matches
+
+
+def match_best_secret(secrets, target_vault_ids):
+ '''Find the best secret from secrets that matches target_vault_ids
+
+ Since secrets should be ordered so the early secrets are 'better' than later ones, this
+ just finds all the matches, then returns the first secret'''
+ matches = match_secrets(secrets, target_vault_ids)
+ if matches:
+ return matches[0]
+ # raise exception?
+ return None
+
+
+def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
+ # See if the --encrypt-vault-id matches a vault-id
+ display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
+
+ if encrypt_vault_id is None:
+ raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
+
+ encrypt_vault_id_matchers = [encrypt_vault_id]
+ encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
+
+ # return the best match for --encrypt-vault-id
+ if encrypt_secret:
+ return encrypt_secret
+
+ # If we specified a encrypt_vault_id and we couldn't find it, dont
+ # fallback to using the first/best secret
+ raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
+ [_v for _v, _vs in secrets]))
+
+
+def match_encrypt_secret(secrets, encrypt_vault_id=None):
+ '''Find the best/first/only secret in secrets to use for encrypting'''
+
+ display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
+ # See if the --encrypt-vault-id matches a vault-id
+ if encrypt_vault_id:
+ return match_encrypt_vault_id_secret(secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ # Find the best/first secret from secrets since we didnt specify otherwise
+ # ie, consider all of the available secrets as matches
+ _vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
+ best_secret = match_best_secret(secrets, _vault_id_matchers)
+
+ # can be empty list sans any tuple
+ return best_secret
+
+
+class VaultLib:
+ def __init__(self, secrets=None):
+ self.secrets = secrets or []
+ self.cipher_name = None
+ self.b_version = b'1.2'
+
+ @staticmethod
+ def is_encrypted(vaulttext):
+ return is_encrypted(vaulttext)
+
+ def encrypt(self, plaintext, secret=None, vault_id=None, salt=None):
+ """Vault encrypt a piece of data.
+
+ :arg plaintext: a text or byte string to encrypt.
+ :returns: a utf-8 encoded byte str of encrypted data. The string
+ contains a header identifying this as vault encrypted data and
+ formatted to newline terminated lines of 80 characters. This is
+ suitable for dumping as is to a vault file.
+
+ If the string passed in is a text string, it will be encoded to UTF-8
+ before encryption.
+ """
+
+ if secret is None:
+ if self.secrets:
+ dummy, secret = match_encrypt_secret(self.secrets)
+ else:
+ raise AnsibleVaultError("A vault password must be specified to encrypt data")
+
+ b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
+
+ if is_encrypted(b_plaintext):
+ raise AnsibleError("input is already encrypted")
+
+ if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
+ self.cipher_name = u"AES256"
+
+ try:
+ this_cipher = CIPHER_MAPPING[self.cipher_name]()
+ except KeyError:
+ raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
+
+ # encrypt data
+ if vault_id:
+ display.vvvvv(u'Encrypting with vault_id "%s" and vault secret %s' % (to_text(vault_id), to_text(secret)))
+ else:
+ display.vvvvv(u'Encrypting without a vault_id using vault secret %s' % to_text(secret))
+
+ b_ciphertext = this_cipher.encrypt(b_plaintext, secret, salt)
+
+ # format the data for output to the file
+ b_vaulttext = format_vaulttext_envelope(b_ciphertext,
+ self.cipher_name,
+ vault_id=vault_id)
+ return b_vaulttext
+
+ def decrypt(self, vaulttext, filename=None, obj=None):
+ '''Decrypt a piece of vault encrypted data.
+
+ :arg vaulttext: a string to decrypt. Since vault encrypted data is an
+ ascii text format this can be either a byte str or unicode string.
+ :kwarg filename: a filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted.
+ :returns: a byte string containing the decrypted data and the vault-id that was used
+
+ '''
+ plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename, obj=obj)
+ return plaintext
+
+ def decrypt_and_get_vault_id(self, vaulttext, filename=None, obj=None):
+ """Decrypt a piece of vault encrypted data.
+
+ :arg vaulttext: a string to decrypt. Since vault encrypted data is an
+ ascii text format this can be either a byte str or unicode string.
+ :kwarg filename: a filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted.
+ :returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
+
+ """
+ b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
+
+ if self.secrets is None:
+ raise AnsibleVaultError("A vault password must be specified to decrypt data")
+
+ if not is_encrypted(b_vaulttext):
+ msg = "input is not vault encrypted data. "
+ if filename:
+ msg += "%s is not a vault encrypted file" % to_native(filename)
+ raise AnsibleError(msg)
+
+ b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
+
+ # create the cipher object, note that the cipher used for decrypt can
+ # be different than the cipher used for encrypt
+ if cipher_name in CIPHER_WHITELIST:
+ this_cipher = CIPHER_MAPPING[cipher_name]()
+ else:
+ raise AnsibleError("{0} cipher could not be found".format(cipher_name))
+
+ b_plaintext = None
+
+ if not self.secrets:
+ raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
+
+ # WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
+ # decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
+ # or signed vault payload. There is no cryptographic checking/verification/validation of the
+ # vault blobs vault id. It can be tampered with and changed. The vault id is just a nick
+ # name to use to pick the best secret and provide some ux/ui info.
+
+ # iterate over all the applicable secrets (all of them by default) until one works...
+ # if we specify a vault_id, only the corresponding vault secret is checked and
+ # we check it first.
+
+ vault_id_matchers = []
+ vault_id_used = None
+ vault_secret_used = None
+
+ if vault_id:
+ display.vvvvv(u'Found a vault_id (%s) in the vaulttext' % to_text(vault_id))
+ vault_id_matchers.append(vault_id)
+ _matches = match_secrets(self.secrets, vault_id_matchers)
+ if _matches:
+ display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(filename)))
+ else:
+ display.vvvvv(u'Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % to_text(vault_id))
+
+ # Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and
+ # the known vault secrets.
+ if not C.DEFAULT_VAULT_ID_MATCH:
+ # Add all of the known vault_ids as candidates for decrypting a vault.
+ vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id])
+
+ matched_secrets = match_secrets(self.secrets, vault_id_matchers)
+
+ # for vault_secret_id in vault_secret_ids:
+ for vault_secret_id, vault_secret in matched_secrets:
+ display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(filename)))
+
+ try:
+ # secret = self.secrets[vault_secret_id]
+ display.vvvv(u'Trying secret %s for vault_id=%s' % (to_text(vault_secret), to_text(vault_secret_id)))
+ b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
+ if b_plaintext is not None:
+ vault_id_used = vault_secret_id
+ vault_secret_used = vault_secret
+ file_slug = ''
+ if filename:
+ file_slug = ' of "%s"' % filename
+ display.vvvvv(
+ u'Decrypt%s successful with secret=%s and vault_id=%s' % (to_text(file_slug), to_text(vault_secret), to_text(vault_secret_id))
+ )
+ break
+ except AnsibleVaultFormatError as exc:
+ exc.obj = obj
+ msg = u"There was a vault format error"
+ if filename:
+ msg += u' in %s' % (to_text(filename))
+ msg += u': %s' % to_text(exc)
+ display.warning(msg, formatted=True)
+ raise
+ except AnsibleError as e:
+ display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
+ (to_text(vault_secret_id), to_text(filename), e))
+ continue
+ else:
+ msg = "Decryption failed (no vault secrets were found that could decrypt)"
+ if filename:
+ msg += " on %s" % to_native(filename)
+ raise AnsibleVaultError(msg)
+
+ if b_plaintext is None:
+ msg = "Decryption failed"
+ if filename:
+ msg += " on %s" % to_native(filename)
+ raise AnsibleError(msg)
+
+ return b_plaintext, vault_id_used, vault_secret_used
+
+
+class VaultEditor:
+
+ def __init__(self, vault=None):
+ # TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects...
+ self.vault = vault or VaultLib()
+
+ # TODO: mv shred file stuff to it's own class
+ def _shred_file_custom(self, tmp_path):
+ """"Destroy a file, when shred (core-utils) is not available
+
+ Unix `shred' destroys files "so that they can be recovered only with great difficulty with
+ specialised hardware, if at all". It is based on the method from the paper
+ "Secure Deletion of Data from Magnetic and Solid-State Memory",
+ Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
+
+ We do not go to that length to re-implement shred in Python; instead, overwriting with a block
+ of random data should suffice.
+
+ See https://github.com/ansible/ansible/pull/13700 .
+ """
+
+ file_len = os.path.getsize(tmp_path)
+
+ if file_len > 0: # avoid work when file was empty
+ max_chunk_len = min(1024 * 1024 * 2, file_len)
+
+ passes = 3
+ with open(tmp_path, "wb") as fh:
+ for _ in range(passes):
+ fh.seek(0, 0)
+ # get a random chunk of data, each pass with other length
+ chunk_len = random.randint(max_chunk_len // 2, max_chunk_len)
+ data = os.urandom(chunk_len)
+
+ for _ in range(0, file_len // chunk_len):
+ fh.write(data)
+ fh.write(data[:file_len % chunk_len])
+
+ # FIXME remove this assert once we have unittests to check its accuracy
+ if fh.tell() != file_len:
+ raise AnsibleAssertionError()
+
+ os.fsync(fh)
+
+ def _shred_file(self, tmp_path):
+ """Securely destroy a decrypted file
+
+ Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
+ due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
+ guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
+ it is a non-issue.
+
+ Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
+ a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
+ a custom shredding method.
+ """
+
+ if not os.path.isfile(tmp_path):
+ # file is already gone
+ return
+
+ try:
+ r = subprocess.call(['shred', tmp_path])
+ except (OSError, ValueError):
+ # shred is not available on this system, or some other error occurred.
+ # ValueError caught because macOS El Capitan is raising an
+ # exception big enough to hit a limit in python2-2.7.11 and below.
+ # Symptom is ValueError: insecure pickle when shred is not
+ # installed there.
+ r = 1
+
+ if r != 0:
+ # we could not successfully execute unix shred; therefore, do custom shred.
+ self._shred_file_custom(tmp_path)
+
+ os.remove(tmp_path)
+
+ def _edit_file_helper(self, filename, secret, existing_data=None, force_save=False, vault_id=None):
+
+ # Create a tempfile
+ root, ext = os.path.splitext(os.path.realpath(filename))
+ fd, tmp_path = tempfile.mkstemp(suffix=ext, dir=C.DEFAULT_LOCAL_TMP)
+
+ cmd = self._editor_shell_command(tmp_path)
+ try:
+ if existing_data:
+ self.write_data(existing_data, fd, shred=False)
+ except Exception:
+ # if an error happens, destroy the decrypted file
+ self._shred_file(tmp_path)
+ raise
+ finally:
+ os.close(fd)
+
+ try:
+ # drop the user into an editor on the tmp file
+ subprocess.call(cmd)
+ except Exception as e:
+ # if an error happens, destroy the decrypted file
+ self._shred_file(tmp_path)
+ raise AnsibleError('Unable to execute the command "%s": %s' % (' '.join(cmd), to_native(e)))
+
+ b_tmpdata = self.read_data(tmp_path)
+
+ # Do nothing if the content has not changed
+ if force_save or existing_data != b_tmpdata:
+
+ # encrypt new data and write out to tmp
+ # An existing vaultfile will always be UTF-8,
+ # so decode to unicode here
+ b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id)
+ self.write_data(b_ciphertext, tmp_path)
+
+ # shuffle tmp file into place
+ self.shuffle_files(tmp_path, filename)
+ display.vvvvv(u'Saved edited file "%s" encrypted using %s and vault id "%s"' % (to_text(filename), to_text(secret), to_text(vault_id)))
+
+ # always shred temp, jic
+ self._shred_file(tmp_path)
+
+ def _real_path(self, filename):
+ # '-' is special to VaultEditor, dont expand it.
+ if filename == '-':
+ return filename
+
+ real_path = os.path.realpath(filename)
+ return real_path
+
+ def encrypt_bytes(self, b_plaintext, secret, vault_id=None):
+
+ b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
+
+ return b_ciphertext
+
+ def encrypt_file(self, filename, secret, vault_id=None, output_file=None):
+
+ # A file to be encrypted into a vaultfile could be any encoding
+ # so treat the contents as a byte string.
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ b_plaintext = self.read_data(filename)
+ b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
+ self.write_data(b_ciphertext, output_file or filename)
+
+ def decrypt_file(self, filename, output_file=None):
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ ciphertext = self.read_data(filename)
+
+ try:
+ plaintext = self.vault.decrypt(ciphertext, filename=filename)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+ self.write_data(plaintext, output_file or filename, shred=False)
+
+ def create_file(self, filename, secret, vault_id=None):
+ """ create a new encrypted file """
+
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.exists(dirname):
+ display.warning(u"%s does not exist, creating..." % to_text(dirname))
+ makedirs_safe(dirname)
+
+ # FIXME: If we can raise an error here, we can probably just make it
+ # behave like edit instead.
+ if os.path.isfile(filename):
+ raise AnsibleError("%s exists, please use 'edit' instead" % filename)
+
+ self._edit_file_helper(filename, secret, vault_id=vault_id)
+
+ def edit_file(self, filename):
+ vault_id_used = None
+ vault_secret_used = None
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ b_vaulttext = self.read_data(filename)
+
+ # vault or yaml files are always utf8
+ vaulttext = to_text(b_vaulttext)
+
+ try:
+ # vaulttext gets converted back to bytes, but alas
+ # TODO: return the vault_id that worked?
+ plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # Figure out the vault id from the file, to select the right secret to re-encrypt it
+ # (duplicates parts of decrypt, but alas...)
+ dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
+
+ # vault id here may not be the vault id actually used for decrypting
+ # as when the edited file has no vault-id but is decrypted by non-default id in secrets
+ # (vault_id=default, while a different vault-id decrypted)
+
+ # we want to get rid of files encrypted with the AES cipher
+ force_save = (cipher_name not in CIPHER_WRITE_WHITELIST)
+
+ # Keep the same vault-id (and version) as in the header
+ self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext, force_save=force_save, vault_id=vault_id)
+
+ def plaintext(self, filename):
+
+ b_vaulttext = self.read_data(filename)
+ vaulttext = to_text(b_vaulttext)
+
+ try:
+ plaintext = self.vault.decrypt(vaulttext, filename=filename)
+ return plaintext
+ except AnsibleError as e:
+ raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # FIXME/TODO: make this use VaultSecret
+ def rekey_file(self, filename, new_vault_secret, new_vault_id=None):
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ prev = os.stat(filename)
+ b_vaulttext = self.read_data(filename)
+ vaulttext = to_text(b_vaulttext)
+
+ display.vvvvv(u'Rekeying file "%s" to with new vault-id "%s" and vault secret %s' %
+ (to_text(filename), to_text(new_vault_id), to_text(new_vault_secret)))
+ try:
+ plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # This is more or less an assert, see #18247
+ if new_vault_secret is None:
+ raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
+
+ # FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets
+
+ # Need a new VaultLib because the new vault data can be a different
+ # vault lib format or cipher (for ex, when we migrate 1.0 style vault data to
+ # 1.1 style data we change the version and the cipher). This is where a VaultContext might help
+
+ # the new vault will only be used for encrypting, so it doesn't need the vault secrets
+ # (we will pass one in directly to encrypt)
+ new_vault = VaultLib(secrets={})
+ b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id)
+
+ self.write_data(b_new_vaulttext, filename)
+
+ # preserve permissions
+ os.chmod(filename, prev.st_mode)
+ os.chown(filename, prev.st_uid, prev.st_gid)
+
+ display.vvvvv(u'Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' %
+ (to_text(filename), to_text(vault_id_used), to_text(new_vault_id), to_text(new_vault_secret)))
+
+ def read_data(self, filename):
+
+ try:
+ if filename == '-':
+ data = sys.stdin.buffer.read()
+ else:
+ with open(filename, "rb") as fh:
+ data = fh.read()
+ except Exception as e:
+ msg = to_native(e)
+ if not msg:
+ msg = repr(e)
+ raise AnsibleError('Unable to read source file (%s): %s' % (to_native(filename), msg))
+
+ return data
+
+ def write_data(self, data, thefile, shred=True, mode=0o600):
+ # TODO: add docstrings for arg types since this code is picky about that
+ """Write the data bytes to given path
+
+ This is used to write a byte string to a file or stdout. It is used for
+ writing the results of vault encryption or decryption. It is used for
+ saving the ciphertext after encryption and it is also used for saving the
+ plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
+ since in the plaintext case, the original contents can be of any text encoding
+ or arbitrary binary data.
+
+ When used to write the result of vault encryption, the val of the 'data' arg
+ should be a utf-8 encoded byte string and not a text typ and not a text type..
+
+ When used to write the result of vault decryption, the val of the 'data' arg
+ should be a byte string and not a text type.
+
+ :arg data: the byte string (bytes) data
+ :arg thefile: file descriptor or filename to save 'data' to.
+ :arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
+ :returns: None
+ """
+ # FIXME: do we need this now? data_bytes should always be a utf-8 byte string
+ b_file_data = to_bytes(data, errors='strict')
+
+ # check if we have a file descriptor instead of a path
+ is_fd = False
+ try:
+ is_fd = (isinstance(thefile, int) and fcntl.fcntl(thefile, fcntl.F_GETFD) != -1)
+ except Exception:
+ pass
+
+ if is_fd:
+ # if passed descriptor, use that to ensure secure access, otherwise it is a string.
+ # assumes the fd is securely opened by caller (mkstemp)
+ os.ftruncate(thefile, 0)
+ os.write(thefile, b_file_data)
+ elif thefile == '-':
+ # get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
+ # We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
+ # of the vaulted object could be anything/binary/etc
+ output = getattr(sys.stdout, 'buffer', sys.stdout)
+ output.write(b_file_data)
+ else:
+ # file names are insecure and prone to race conditions, so remove and create securely
+ if os.path.isfile(thefile):
+ if shred:
+ self._shred_file(thefile)
+ else:
+ os.remove(thefile)
+
+ # when setting new umask, we get previous as return
+ current_umask = os.umask(0o077)
+ try:
+ try:
+ # create file with secure permissions
+ fd = os.open(thefile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC, mode)
+ except OSError as ose:
+ # Want to catch FileExistsError, which doesn't exist in Python 2, so catch OSError
+ # and compare the error number to get equivalent behavior in Python 2/3
+ if ose.errno == errno.EEXIST:
+ raise AnsibleError('Vault file got recreated while we were operating on it: %s' % to_native(ose))
+
+ raise AnsibleError('Problem creating temporary vault file: %s' % to_native(ose))
+
+ try:
+ # now write to the file and ensure ours is only data in it
+ os.ftruncate(fd, 0)
+ os.write(fd, b_file_data)
+ except OSError as e:
+ raise AnsibleError('Unable to write to temporary vault file: %s' % to_native(e))
+ finally:
+ # Make sure the file descriptor is always closed and reset umask
+ os.close(fd)
+ finally:
+ os.umask(current_umask)
+
+ def shuffle_files(self, src, dest):
+ prev = None
+ # overwrite dest with src
+ if os.path.isfile(dest):
+ prev = os.stat(dest)
+ # old file 'dest' was encrypted, no need to _shred_file
+ os.remove(dest)
+ shutil.move(src, dest)
+
+ # reset permissions if needed
+ if prev is not None:
+ # TODO: selinux, ACLs, xattr?
+ os.chmod(dest, prev.st_mode)
+ os.chown(dest, prev.st_uid, prev.st_gid)
+
+ def _editor_shell_command(self, filename):
+ env_editor = os.environ.get('EDITOR', 'vi')
+ editor = shlex.split(env_editor)
+ editor.append(filename)
+
+ return editor
+
+
+########################################
+# CIPHERS #
+########################################
+
+class VaultAES256:
+
+ """
+ Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
+ Keys are derived using PBKDF2
+ """
+
+ # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
+
+ # Note: strings in this class should be byte strings by default.
+
+ def __init__(self):
+ if not HAS_CRYPTOGRAPHY:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY)
+
+ @staticmethod
+ def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
+ kdf = PBKDF2HMAC(
+ algorithm=hashes.SHA256(),
+ length=2 * key_length + iv_length,
+ salt=b_salt,
+ iterations=10000,
+ backend=CRYPTOGRAPHY_BACKEND)
+ b_derivedkey = kdf.derive(b_password)
+
+ return b_derivedkey
+
+ @classmethod
+ def _gen_key_initctr(cls, b_password, b_salt):
+ # 16 for AES 128, 32 for AES256
+ key_length = 32
+
+ if HAS_CRYPTOGRAPHY:
+ # AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
+ iv_length = algorithms.AES.block_size // 8
+
+ b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
+ b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
+
+ b_key1 = b_derivedkey[:key_length]
+ b_key2 = b_derivedkey[key_length:(key_length * 2)]
+
+ return b_key1, b_key2, b_iv
+
+ @staticmethod
+ def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
+ cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
+ encryptor = cipher.encryptor()
+ padder = padding.PKCS7(algorithms.AES.block_size).padder()
+ b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
+ b_ciphertext += encryptor.finalize()
+
+ # COMBINE SALT, DIGEST AND DATA
+ hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
+ hmac.update(b_ciphertext)
+ b_hmac = hmac.finalize()
+
+ return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
+
+ @classmethod
+ def encrypt(cls, b_plaintext, secret, salt=None):
+
+ if secret is None:
+ raise AnsibleVaultError('The secret passed to encrypt() was None')
+
+ if salt is None:
+ b_salt = os.urandom(32)
+ elif not salt:
+ raise AnsibleVaultError('Empty or invalid salt passed to encrypt()')
+ else:
+ b_salt = to_bytes(salt)
+
+ b_password = secret.bytes
+ b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
+
+ if HAS_CRYPTOGRAPHY:
+ b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
+
+ b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
+ # Unnecessary but getting rid of it is a backwards incompatible vault
+ # format change
+ b_vaulttext = hexlify(b_vaulttext)
+ return b_vaulttext
+
+ @classmethod
+ def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
+ # b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
+ # EXIT EARLY IF DIGEST DOESN'T MATCH
+ hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
+ hmac.update(b_ciphertext)
+ try:
+ hmac.verify(_unhexlify(b_crypted_hmac))
+ except InvalidSignature as e:
+ raise AnsibleVaultError('HMAC verification failed: %s' % e)
+
+ cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
+ decryptor = cipher.decryptor()
+ unpadder = padding.PKCS7(128).unpadder()
+ b_plaintext = unpadder.update(
+ decryptor.update(b_ciphertext) + decryptor.finalize()
+ ) + unpadder.finalize()
+
+ return b_plaintext
+
+ @staticmethod
+ def _is_equal(b_a, b_b):
+ """
+ Comparing 2 byte arrays in constant time to avoid timing attacks.
+
+ It would be nice if there were a library for this but hey.
+ """
+ if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
+ raise TypeError('_is_equal can only be used to compare two byte strings')
+
+ # http://codahale.com/a-lesson-in-timing-attacks/
+ if len(b_a) != len(b_b):
+ return False
+
+ result = 0
+ for b_x, b_y in zip(b_a, b_b):
+ result |= b_x ^ b_y
+ return result == 0
+
+ @classmethod
+ def decrypt(cls, b_vaulttext, secret):
+
+ b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
+
+ # TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
+ # (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
+ # though, likely needs to be python cryptography specific impl that basically
+ # creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
+ b_password = secret.bytes
+
+ b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
+
+ if HAS_CRYPTOGRAPHY:
+ b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
+
+ return b_plaintext
+
+
+# Keys could be made bytes later if the code that gets the data is more
+# naturally byte-oriented
+CIPHER_MAPPING = {
+ u'AES256': VaultAES256,
+}
diff --git a/lib/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
new file mode 100644
index 0000000..ae8ccff
--- /dev/null
+++ b/lib/ansible/parsing/yaml/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
new file mode 100644
index 0000000..4b79578
--- /dev/null
+++ b/lib/ansible/parsing/yaml/constructor.py
@@ -0,0 +1,178 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml.constructor import SafeConstructor, ConstructorError
+from yaml.nodes import MappingNode
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode, AnsibleVaultEncryptedUnicode
+from ansible.parsing.vault import VaultLib
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var
+
+display = Display()
+
+
+class AnsibleConstructor(SafeConstructor):
+ def __init__(self, file_name=None, vault_secrets=None):
+ self._ansible_file_name = file_name
+ super(AnsibleConstructor, self).__init__()
+ self._vaults = {}
+ self.vault_secrets = vault_secrets or []
+ self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
+
+ def construct_yaml_map(self, node):
+ data = AnsibleMapping()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+ data.ansible_pos = self._node_position_info(node)
+
+ def construct_mapping(self, node, deep=False):
+ # Most of this is from yaml.constructor.SafeConstructor. We replicate
+ # it here so that we can warn users when they have duplicate dict keys
+ # (pyyaml silently allows overwriting keys)
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ self.flatten_mapping(node)
+ mapping = AnsibleMapping()
+
+ # Add our extra information to the returned value
+ mapping.ansible_pos = self._node_position_info(node)
+
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+
+ if key in mapping:
+ msg = (u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
+ u' Using last defined value only.'.format(key, *mapping.ansible_pos))
+ if C.DUPLICATE_YAML_DICT_KEY == 'warn':
+ display.warning(msg)
+ elif C.DUPLICATE_YAML_DICT_KEY == 'error':
+ raise ConstructorError(context=None, context_mark=None,
+ problem=to_native(msg),
+ problem_mark=node.start_mark,
+ note=None)
+ else:
+ # when 'ignore'
+ display.debug(msg)
+
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+
+ return mapping
+
+ def construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ value = self.construct_scalar(node)
+ ret = AnsibleUnicode(value)
+
+ ret.ansible_pos = self._node_position_info(node)
+
+ return ret
+
+ def construct_vault_encrypted_unicode(self, node):
+ value = self.construct_scalar(node)
+ b_ciphertext_data = to_bytes(value)
+ # could pass in a key id here to choose the vault to associate with
+ # TODO/FIXME: plugin vault selector
+ vault = self._vaults['default']
+ if vault.secrets is None:
+ raise ConstructorError(context=None, context_mark=None,
+ problem="found !vault but no vault password provided",
+ problem_mark=node.start_mark,
+ note=None)
+ ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
+ ret.vault = vault
+ ret.ansible_pos = self._node_position_info(node)
+ return ret
+
+ def construct_yaml_seq(self, node):
+ data = AnsibleSequence()
+ yield data
+ data.extend(self.construct_sequence(node))
+ data.ansible_pos = self._node_position_info(node)
+
+ def construct_yaml_unsafe(self, node):
+ try:
+ constructor = getattr(node, 'id', 'object')
+ if constructor is not None:
+ constructor = getattr(self, 'construct_%s' % constructor)
+ except AttributeError:
+ constructor = self.construct_object
+
+ value = constructor(node)
+
+ return wrap_var(value)
+
+ def _node_position_info(self, node):
+ # the line number where the previous token has ended (plus empty lines)
+ # Add one so that the first line is line 1 rather than line 0
+ column = node.start_mark.column + 1
+ line = node.start_mark.line + 1
+
+ # in some cases, we may have pre-read the data and then
+ # passed it to the load() call for YAML, in which case we
+ # want to override the default datasource (which would be
+ # '<string>') to the actual filename we read in
+ datasource = self._ansible_file_name or node.start_mark.name
+
+ return (datasource, line, column)
+
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ AnsibleConstructor.construct_yaml_seq)
+
+AnsibleConstructor.add_constructor(
+ u'!unsafe',
+ AnsibleConstructor.construct_yaml_unsafe)
+
+AnsibleConstructor.add_constructor(
+ u'!vault',
+ AnsibleConstructor.construct_vault_encrypted_unicode)
+
+AnsibleConstructor.add_constructor(u'!vault-encrypted', AnsibleConstructor.construct_vault_encrypted_unicode)
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
new file mode 100644
index 0000000..8701bb8
--- /dev/null
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -0,0 +1,122 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import yaml
+
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils.common.yaml import SafeDumper
+from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes, NativeJinjaUnsafeText, NativeJinjaText
+from ansible.template import AnsibleUndefined
+from ansible.vars.hostvars import HostVars, HostVarsVars
+from ansible.vars.manager import VarsWithSources
+
+
+class AnsibleDumper(SafeDumper):
+ '''
+ A simple stub class that allows us to add representers
+ for our overridden object types.
+ '''
+
+
+def represent_hostvars(self, data):
+ return self.represent_dict(dict(data))
+
+
+# Note: only want to represent the encrypted data
+def represent_vault_encrypted_unicode(self, data):
+ return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
+
+
+def represent_unicode(self, data):
+ return yaml.representer.SafeRepresenter.represent_str(self, text_type(data))
+
+
+def represent_binary(self, data):
+ return yaml.representer.SafeRepresenter.represent_binary(self, binary_type(data))
+
+
+def represent_undefined(self, data):
+ # Here bool will ensure _fail_with_undefined_error happens
+ # if the value is Undefined.
+ # This happens because Jinja sets __bool__ on StrictUndefined
+ return bool(data)
+
+
+AnsibleDumper.add_representer(
+ AnsibleUnicode,
+ represent_unicode,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleUnsafeText,
+ represent_unicode,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleUnsafeBytes,
+ represent_binary,
+)
+
+AnsibleDumper.add_representer(
+ HostVars,
+ represent_hostvars,
+)
+
+AnsibleDumper.add_representer(
+ HostVarsVars,
+ represent_hostvars,
+)
+
+AnsibleDumper.add_representer(
+ VarsWithSources,
+ represent_hostvars,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleSequence,
+ yaml.representer.SafeRepresenter.represent_list,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleMapping,
+ yaml.representer.SafeRepresenter.represent_dict,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleVaultEncryptedUnicode,
+ represent_vault_encrypted_unicode,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleUndefined,
+ represent_undefined,
+)
+
+AnsibleDumper.add_representer(
+ NativeJinjaUnsafeText,
+ represent_unicode,
+)
+
+AnsibleDumper.add_representer(
+ NativeJinjaText,
+ represent_unicode,
+)
diff --git a/lib/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
new file mode 100644
index 0000000..15bde79
--- /dev/null
+++ b/lib/ansible/parsing/yaml/loader.py
@@ -0,0 +1,45 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml.resolver import Resolver
+
+from ansible.parsing.yaml.constructor import AnsibleConstructor
+from ansible.module_utils.common.yaml import HAS_LIBYAML, Parser
+
+if HAS_LIBYAML:
+ class AnsibleLoader(Parser, AnsibleConstructor, Resolver): # type: ignore[misc] # pylint: disable=inconsistent-mro
+ def __init__(self, stream, file_name=None, vault_secrets=None):
+ Parser.__init__(self, stream)
+ AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
+ Resolver.__init__(self)
+else:
+ from yaml.composer import Composer
+ from yaml.reader import Reader
+ from yaml.scanner import Scanner
+
+ class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver): # type: ignore[misc,no-redef] # pylint: disable=inconsistent-mro
+ def __init__(self, stream, file_name=None, vault_secrets=None):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
+ Resolver.__init__(self)
diff --git a/lib/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
new file mode 100644
index 0000000..a2e2a66
--- /dev/null
+++ b/lib/ansible/parsing/yaml/objects.py
@@ -0,0 +1,365 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import string
+import sys as _sys
+
+from collections.abc import Sequence
+
+import sys
+import yaml
+
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_bytes, to_text, to_native
+
+
+class AnsibleBaseYAMLObject(object):
+ '''
+ the base class used to sub-class python built-in objects
+ so that we can add attributes to them during yaml parsing
+
+ '''
+ _data_source = None
+ _line_number = 0
+ _column_number = 0
+
+ def _get_ansible_position(self):
+ return (self._data_source, self._line_number, self._column_number)
+
+ def _set_ansible_position(self, obj):
+ try:
+ (src, line, col) = obj
+ except (TypeError, ValueError):
+ raise AssertionError(
+ 'ansible_pos can only be set with a tuple/list '
+ 'of three values: source, line number, column number'
+ )
+ self._data_source = src
+ self._line_number = line
+ self._column_number = col
+
+ ansible_pos = property(_get_ansible_position, _set_ansible_position)
+
+
+class AnsibleMapping(AnsibleBaseYAMLObject, dict):
+ ''' sub class for dictionaries '''
+ pass
+
+
+class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
+ ''' sub class for unicode objects '''
+ pass
+
+
+class AnsibleSequence(AnsibleBaseYAMLObject, list):
+ ''' sub class for lists '''
+ pass
+
+
+class AnsibleVaultEncryptedUnicode(Sequence, AnsibleBaseYAMLObject):
+ '''Unicode like object that is not evaluated (decrypted) until it needs to be'''
+ __UNSAFE__ = True
+ __ENCRYPTED__ = True
+ yaml_tag = u'!vault'
+
+ @classmethod
+ def from_plaintext(cls, seq, vault, secret):
+ if not vault:
+ raise vault.AnsibleVaultError('Error creating AnsibleVaultEncryptedUnicode, invalid vault (%s) provided' % vault)
+
+ ciphertext = vault.encrypt(seq, secret)
+ avu = cls(ciphertext)
+ avu.vault = vault
+ return avu
+
+ def __init__(self, ciphertext):
+ '''A AnsibleUnicode with a Vault attribute that can decrypt it.
+
+ ciphertext is a byte string (str on PY2, bytestring on PY3).
+
+ The .data attribute is a property that returns the decrypted plaintext
+ of the ciphertext as a PY2 unicode or PY3 string object.
+ '''
+ super(AnsibleVaultEncryptedUnicode, self).__init__()
+
+ # after construction, calling code has to set the .vault attribute to a vaultlib object
+ self.vault = None
+ self._ciphertext = to_bytes(ciphertext)
+
+ @property
+ def data(self):
+ if not self.vault:
+ return to_text(self._ciphertext)
+ return to_text(self.vault.decrypt(self._ciphertext, obj=self))
+
+ @data.setter
+ def data(self, value):
+ self._ciphertext = to_bytes(value)
+
+ def is_encrypted(self):
+ return self.vault and self.vault.is_encrypted(self._ciphertext)
+
+ def __eq__(self, other):
+ if self.vault:
+ return other == self.data
+ return False
+
+ def __ne__(self, other):
+ if self.vault:
+ return other != self.data
+ return True
+
+ def __reversed__(self):
+ # This gets inerhited from ``collections.Sequence`` which returns a generator
+ # make this act more like the string implementation
+ return to_text(self[::-1], errors='surrogate_or_strict')
+
+ def __str__(self):
+ return to_native(self.data, errors='surrogate_or_strict')
+
+ def __unicode__(self):
+ return to_text(self.data, errors='surrogate_or_strict')
+
+ def encode(self, encoding=None, errors=None):
+ return to_bytes(self.data, encoding=encoding, errors=errors)
+
+ # Methods below are a copy from ``collections.UserString``
+ # Some are copied as is, where others are modified to not
+ # auto wrap with ``self.__class__``
+ def __repr__(self):
+ return repr(self.data)
+
+ def __int__(self, base=10):
+ return int(self.data, base=base)
+
+ def __float__(self):
+ return float(self.data)
+
+ def __complex__(self):
+ return complex(self.data)
+
+ def __hash__(self):
+ return hash(self.data)
+
+ # This breaks vault, do not define it, we cannot satisfy this
+ # def __getnewargs__(self):
+ # return (self.data[:],)
+
+ def __lt__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data < string.data
+ return self.data < string
+
+ def __le__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data <= string.data
+ return self.data <= string
+
+ def __gt__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data > string.data
+ return self.data > string
+
+ def __ge__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data >= string.data
+ return self.data >= string
+
+ def __contains__(self, char):
+ if isinstance(char, AnsibleVaultEncryptedUnicode):
+ char = char.data
+ return char in self.data
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, index):
+ return self.data[index]
+
+ def __getslice__(self, start, end):
+ start = max(start, 0)
+ end = max(end, 0)
+ return self.data[start:end]
+
+ def __add__(self, other):
+ if isinstance(other, AnsibleVaultEncryptedUnicode):
+ return self.data + other.data
+ elif isinstance(other, text_type):
+ return self.data + other
+ return self.data + to_text(other)
+
+ def __radd__(self, other):
+ if isinstance(other, text_type):
+ return other + self.data
+ return to_text(other) + self.data
+
+ def __mul__(self, n):
+ return self.data * n
+
+ __rmul__ = __mul__
+
+ def __mod__(self, args):
+ return self.data % args
+
+ def __rmod__(self, template):
+ return to_text(template) % self
+
+ # the following methods are defined in alphabetical order:
+ def capitalize(self):
+ return self.data.capitalize()
+
+ def casefold(self):
+ return self.data.casefold()
+
+ def center(self, width, *args):
+ return self.data.center(width, *args)
+
+ def count(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.count(sub, start, end)
+
+ def endswith(self, suffix, start=0, end=_sys.maxsize):
+ return self.data.endswith(suffix, start, end)
+
+ def expandtabs(self, tabsize=8):
+ return self.data.expandtabs(tabsize)
+
+ def find(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.find(sub, start, end)
+
+ def format(self, *args, **kwds):
+ return self.data.format(*args, **kwds)
+
+ def format_map(self, mapping):
+ return self.data.format_map(mapping)
+
+ def index(self, sub, start=0, end=_sys.maxsize):
+ return self.data.index(sub, start, end)
+
+ def isalpha(self):
+ return self.data.isalpha()
+
+ def isalnum(self):
+ return self.data.isalnum()
+
+ def isascii(self):
+ return self.data.isascii()
+
+ def isdecimal(self):
+ return self.data.isdecimal()
+
+ def isdigit(self):
+ return self.data.isdigit()
+
+ def isidentifier(self):
+ return self.data.isidentifier()
+
+ def islower(self):
+ return self.data.islower()
+
+ def isnumeric(self):
+ return self.data.isnumeric()
+
+ def isprintable(self):
+ return self.data.isprintable()
+
+ def isspace(self):
+ return self.data.isspace()
+
+ def istitle(self):
+ return self.data.istitle()
+
+ def isupper(self):
+ return self.data.isupper()
+
+ def join(self, seq):
+ return self.data.join(seq)
+
+ def ljust(self, width, *args):
+ return self.data.ljust(width, *args)
+
+ def lower(self):
+ return self.data.lower()
+
+ def lstrip(self, chars=None):
+ return self.data.lstrip(chars)
+
+ maketrans = str.maketrans
+
+ def partition(self, sep):
+ return self.data.partition(sep)
+
+ def replace(self, old, new, maxsplit=-1):
+ if isinstance(old, AnsibleVaultEncryptedUnicode):
+ old = old.data
+ if isinstance(new, AnsibleVaultEncryptedUnicode):
+ new = new.data
+ return self.data.replace(old, new, maxsplit)
+
+ def rfind(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.rfind(sub, start, end)
+
+ def rindex(self, sub, start=0, end=_sys.maxsize):
+ return self.data.rindex(sub, start, end)
+
+ def rjust(self, width, *args):
+ return self.data.rjust(width, *args)
+
+ def rpartition(self, sep):
+ return self.data.rpartition(sep)
+
+ def rstrip(self, chars=None):
+ return self.data.rstrip(chars)
+
+ def split(self, sep=None, maxsplit=-1):
+ return self.data.split(sep, maxsplit)
+
+ def rsplit(self, sep=None, maxsplit=-1):
+ return self.data.rsplit(sep, maxsplit)
+
+ def splitlines(self, keepends=False):
+ return self.data.splitlines(keepends)
+
+ def startswith(self, prefix, start=0, end=_sys.maxsize):
+ return self.data.startswith(prefix, start, end)
+
+ def strip(self, chars=None):
+ return self.data.strip(chars)
+
+ def swapcase(self):
+ return self.data.swapcase()
+
+ def title(self):
+ return self.data.title()
+
+ def translate(self, *args):
+ return self.data.translate(*args)
+
+ def upper(self):
+ return self.data.upper()
+
+ def zfill(self, width):
+ return self.data.zfill(width)
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
new file mode 100644
index 0000000..0ab2271
--- /dev/null
+++ b/lib/ansible/playbook/__init__.py
@@ -0,0 +1,117 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_text, to_native
+from ansible.playbook.play import Play
+from ansible.playbook.playbook_include import PlaybookInclude
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+__all__ = ['Playbook']
+
+
+class Playbook:
+
+ def __init__(self, loader):
+ # Entries in the datastructure of a playbook may
+ # be either a play or an include statement
+ self._entries = []
+ self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
+ self._loader = loader
+ self._file_name = None
+
+ @staticmethod
+ def load(file_name, variable_manager=None, loader=None):
+ pb = Playbook(loader=loader)
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+ return pb
+
+ def _load_playbook_data(self, file_name, variable_manager, vars=None):
+
+ if os.path.isabs(file_name):
+ self._basedir = os.path.dirname(file_name)
+ else:
+ self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
+
+ # set the loaders basedir
+ cur_basedir = self._loader.get_basedir()
+ self._loader.set_basedir(self._basedir)
+
+ add_all_plugin_dirs(self._basedir)
+
+ self._file_name = file_name
+
+ try:
+ ds = self._loader.load_from_file(os.path.basename(file_name))
+ except UnicodeDecodeError as e:
+ raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
+
+ # check for errors and restore the basedir in case this error is caught and handled
+ if ds is None:
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("Empty playbook, nothing to do: %s" % unfrackpath(file_name), obj=ds)
+ elif not isinstance(ds, list):
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("A playbook must be a list of plays, got a %s instead: %s" % (type(ds), unfrackpath(file_name)), obj=ds)
+ elif not ds:
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("A playbook must contain at least one play: %s" % unfrackpath(file_name))
+
+ # Parse the playbook entries. For plays, we simply parse them
+ # using the Play() object, and includes are parsed using the
+ # PlaybookInclude() object
+ for entry in ds:
+ if not isinstance(entry, dict):
+ # restore the basedir in case this error is caught and handled
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("playbook entries must be either valid plays or 'import_playbook' statements", obj=entry)
+
+ if any(action in entry for action in C._ACTION_IMPORT_PLAYBOOK):
+ pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
+ if pb is not None:
+ self._entries.extend(pb._entries)
+ else:
+ which = entry
+ for k in C._ACTION_IMPORT_PLAYBOOK:
+ if k in entry:
+ which = entry[k]
+ break
+ display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
+ else:
+ entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
+ self._entries.append(entry_obj)
+
+ # we're done, so restore the old basedir in the loader
+ self._loader.set_basedir(cur_basedir)
+
+ def get_loader(self):
+ return self._loader
+
+ def get_plays(self):
+ return self._entries[:]
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
new file mode 100644
index 0000000..b28405d
--- /dev/null
+++ b/lib/ansible/playbook/attribute.py
@@ -0,0 +1,202 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import copy, deepcopy
+
+from ansible.utils.sentinel import Sentinel
+
+_CONTAINERS = frozenset(('list', 'dict', 'set'))
+
+
+class Attribute:
+
+ def __init__(
+ self,
+ isa=None,
+ private=False,
+ default=None,
+ required=False,
+ listof=None,
+ priority=0,
+ class_type=None,
+ always_post_validate=False,
+ alias=None,
+ static=False,
+ ):
+
+ """
+ :class:`Attribute` specifies constraints for attributes of objects which
+ derive from playbook data. The attributes of the object are basically
+ a schema for the yaml playbook.
+
+ :kwarg isa: The type of the attribute. Allowable values are a string
+ representation of any yaml basic datatype, python class, or percent.
+ (Enforced at post-validation time).
+ :kwarg private: Not used at runtime. The docs playbook keyword dumper uses it to determine
+ that a keyword should not be documented. mpdehaan had plans to remove attributes marked
+ private from the ds so they would not have been available at all.
+ :kwarg default: Default value if unspecified in the YAML document.
+ :kwarg required: Whether or not the YAML document must contain this field.
+ If the attribute is None when post-validated, an error will be raised.
+ :kwarg listof: If isa is set to "list", this can optionally be set to
+ ensure that all elements in the list are of the given type. Valid
+ values here are the same as those for isa.
+ :kwarg priority: The order in which the fields should be parsed. Generally
+ this does not need to be set, it is for rare situations where another
+ field depends on the fact that another field was parsed first.
+ :kwarg class_type: If isa is set to "class", this can be optionally set to
+ a class (not a string name). The YAML data for this field will be
+ passed to the __init__ method of that class during post validation and
+ the field will be an instance of that class.
+ :kwarg always_post_validate: Controls whether a field should be post
+ validated or not (default: False).
+ :kwarg alias: An alias to use for the attribute name, for situations where
+ the attribute name may conflict with a Python reserved word.
+ """
+
+ self.isa = isa
+ self.private = private
+ self.default = default
+ self.required = required
+ self.listof = listof
+ self.priority = priority
+ self.class_type = class_type
+ self.always_post_validate = always_post_validate
+ self.alias = alias
+ self.static = static
+
+ if default is not None and self.isa in _CONTAINERS and not callable(default):
+ raise TypeError('defaults for FieldAttribute may not be mutable, please provide a callable instead')
+
+ def __set_name__(self, owner, name):
+ self.name = name
+
+ def __eq__(self, other):
+ return other.priority == self.priority
+
+ def __ne__(self, other):
+ return other.priority != self.priority
+
+ # NB: higher priority numbers sort first
+
+ def __lt__(self, other):
+ return other.priority < self.priority
+
+ def __gt__(self, other):
+ return other.priority > self.priority
+
+ def __le__(self, other):
+ return other.priority <= self.priority
+
+ def __ge__(self, other):
+ return other.priority >= self.priority
+
+ def __get__(self, obj, obj_type=None):
+ method = f'_get_attr_{self.name}'
+ if hasattr(obj, method):
+ # NOTE this appears to be not used in the codebase,
+ # _get_attr_connection has been replaced by ConnectionFieldAttribute.
+ # Leaving it here for test_attr_method from
+ # test/units/playbook/test_base.py to pass and for backwards compat.
+ if getattr(obj, '_squashed', False):
+ value = getattr(obj, f'_{self.name}', Sentinel)
+ else:
+ value = getattr(obj, method)()
+ else:
+ value = getattr(obj, f'_{self.name}', Sentinel)
+
+ if value is Sentinel:
+ value = self.default
+ if callable(value):
+ value = value()
+ setattr(obj, f'_{self.name}', value)
+
+ return value
+
+ def __set__(self, obj, value):
+ setattr(obj, f'_{self.name}', value)
+ if self.alias is not None:
+ setattr(obj, f'_{self.alias}', value)
+
+ # NOTE this appears to be not needed in the codebase,
+ # leaving it here for test_attr_int_del from
+ # test/units/playbook/test_base.py to pass.
+ def __delete__(self, obj):
+ delattr(obj, f'_{self.name}')
+
+
+class NonInheritableFieldAttribute(Attribute):
+ ...
+
+
+class FieldAttribute(Attribute):
+ def __init__(self, extend=False, prepend=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.extend = extend
+ self.prepend = prepend
+
+ def __get__(self, obj, obj_type=None):
+ if getattr(obj, '_squashed', False) or getattr(obj, '_finalized', False):
+ value = getattr(obj, f'_{self.name}', Sentinel)
+ else:
+ try:
+ value = obj._get_parent_attribute(self.name)
+ except AttributeError:
+ method = f'_get_attr_{self.name}'
+ if hasattr(obj, method):
+ # NOTE this appears to be not needed in the codebase,
+ # _get_attr_connection has been replaced by ConnectionFieldAttribute.
+ # Leaving it here for test_attr_method from
+ # test/units/playbook/test_base.py to pass and for backwards compat.
+ if getattr(obj, '_squashed', False):
+ value = getattr(obj, f'_{self.name}', Sentinel)
+ else:
+ value = getattr(obj, method)()
+ else:
+ value = getattr(obj, f'_{self.name}', Sentinel)
+
+ if value is Sentinel:
+ value = self.default
+ if callable(value):
+ value = value()
+ setattr(obj, f'_{self.name}', value)
+
+ return value
+
+
+class ConnectionFieldAttribute(FieldAttribute):
+ def __get__(self, obj, obj_type=None):
+ from ansible.module_utils.compat.paramiko import paramiko
+ from ansible.utils.ssh_functions import check_for_controlpersist
+ value = super().__get__(obj, obj_type)
+
+ if value == 'smart':
+ value = 'ssh'
+ # see if SSH can support ControlPersist if not use paramiko
+ if not check_for_controlpersist('ssh') and paramiko is not None:
+ value = "paramiko"
+
+ # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
+ elif value == 'persistent' and paramiko is not None:
+ value = 'paramiko'
+
+ return value
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
new file mode 100644
index 0000000..669aa0a
--- /dev/null
+++ b/lib/ansible/playbook/base.py
@@ -0,0 +1,775 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import operator
+import os
+
+from copy import copy as shallowcopy
+
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils._text import to_text, to_native
+from ansible.parsing.dataloader import DataLoader
+from ansible.playbook.attribute import Attribute, FieldAttribute, ConnectionFieldAttribute, NonInheritableFieldAttribute
+from ansible.plugins.loader import module_loader, action_loader
+from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
+
+display = Display()
+
+
+def _validate_action_group_metadata(action, found_group_metadata, fq_group_name):
+ valid_metadata = {
+ 'extend_group': {
+ 'types': (list, string_types,),
+ 'errortype': 'list',
+ },
+ }
+
+ metadata_warnings = []
+
+ validate = C.VALIDATE_ACTION_GROUP_METADATA
+ metadata_only = isinstance(action, dict) and 'metadata' in action and len(action) == 1
+
+ if validate and not metadata_only:
+ found_keys = ', '.join(sorted(list(action)))
+ metadata_warnings.append("The only expected key is metadata, but got keys: {keys}".format(keys=found_keys))
+ elif validate:
+ if found_group_metadata:
+ metadata_warnings.append("The group contains multiple metadata entries.")
+ if not isinstance(action['metadata'], dict):
+ metadata_warnings.append("The metadata is not a dictionary. Got {metadata}".format(metadata=action['metadata']))
+ else:
+ unexpected_keys = set(action['metadata'].keys()) - set(valid_metadata.keys())
+ if unexpected_keys:
+ metadata_warnings.append("The metadata contains unexpected keys: {0}".format(', '.join(unexpected_keys)))
+ unexpected_types = []
+ for field, requirement in valid_metadata.items():
+ if field not in action['metadata']:
+ continue
+ value = action['metadata'][field]
+ if not isinstance(value, requirement['types']):
+ unexpected_types.append("%s is %s (expected type %s)" % (field, value, requirement['errortype']))
+ if unexpected_types:
+ metadata_warnings.append("The metadata contains unexpected key types: {0}".format(', '.join(unexpected_types)))
+ if metadata_warnings:
+ metadata_warnings.insert(0, "Invalid metadata was found for action_group {0} while loading module_defaults.".format(fq_group_name))
+ display.warning(" ".join(metadata_warnings))
+
+
+class FieldAttributeBase:
+
+ @classmethod
+ @property
+ def fattributes(cls):
+ # FIXME is this worth caching?
+ fattributes = {}
+ for class_obj in reversed(cls.__mro__):
+ for name, attr in list(class_obj.__dict__.items()):
+ if not isinstance(attr, Attribute):
+ continue
+ fattributes[name] = attr
+ if attr.alias:
+ setattr(class_obj, attr.alias, attr)
+ fattributes[attr.alias] = attr
+ return fattributes
+
+ def __init__(self):
+
+ # initialize the data loader and variable manager, which will be provided
+ # later when the object is actually loaded
+ self._loader = None
+ self._variable_manager = None
+
+ # other internal params
+ self._validated = False
+ self._squashed = False
+ self._finalized = False
+
+ # every object gets a random uuid:
+ self._uuid = get_unique_id()
+
+ # init vars, avoid using defaults in field declaration as it lives across plays
+ self.vars = dict()
+
+ @property
+ def finalized(self):
+ return self._finalized
+
+ def dump_me(self, depth=0):
+ ''' this is never called from production code, it is here to be used when debugging as a 'complex print' '''
+ if depth == 0:
+ display.debug("DUMPING OBJECT ------------------------------------------------------")
+ display.debug("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
+ if hasattr(self, '_parent') and self._parent:
+ self._parent.dump_me(depth + 2)
+ dep_chain = self._parent.get_dep_chain()
+ if dep_chain:
+ for dep in dep_chain:
+ dep.dump_me(depth + 2)
+ if hasattr(self, '_play') and self._play:
+ self._play.dump_me(depth + 2)
+
+ def preprocess_data(self, ds):
+ ''' infrequently used method to do some pre-processing of legacy terms '''
+ return ds
+
+ def load_data(self, ds, variable_manager=None, loader=None):
+ ''' walk the input datastructure and assign any values '''
+
+ if ds is None:
+ raise AnsibleAssertionError('ds (%s) should not be None but it is.' % ds)
+
+ # cache the datastructure internally
+ setattr(self, '_ds', ds)
+
+ # the variable manager class is used to manage and merge variables
+ # down to a single dictionary for reference in templating, etc.
+ self._variable_manager = variable_manager
+
+ # the data loader class is used to parse data from strings and files
+ if loader is not None:
+ self._loader = loader
+ else:
+ self._loader = DataLoader()
+
+ # call the preprocess_data() function to massage the data into
+ # something we can more easily parse, and then call the validation
+ # function on it to ensure there are no incorrect key values
+ ds = self.preprocess_data(ds)
+ self._validate_attributes(ds)
+
+ # Walk all attributes in the class. We sort them based on their priority
+ # so that certain fields can be loaded before others, if they are dependent.
+ for name, attr in sorted(self.fattributes.items(), key=operator.itemgetter(1)):
+ # copy the value over unless a _load_field method is defined
+ if name in ds:
+ method = getattr(self, '_load_%s' % name, None)
+ if method:
+ setattr(self, name, method(name, ds[name]))
+ else:
+ setattr(self, name, ds[name])
+
+ # run early, non-critical validation
+ self.validate()
+
+ # return the constructed object
+ return self
+
+ def get_ds(self):
+ try:
+ return getattr(self, '_ds')
+ except AttributeError:
+ return None
+
+ def get_loader(self):
+ return self._loader
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def _post_validate_debugger(self, attr, value, templar):
+ value = templar.template(value)
+ valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
+ if value and isinstance(value, string_types) and value not in valid_values:
+ raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
+ return value
+
+ def _validate_attributes(self, ds):
+ '''
+ Ensures that there are no keys in the datastructure which do
+ not map to attributes for this object.
+ '''
+
+ valid_attrs = frozenset(self.fattributes)
+ for key in ds:
+ if key not in valid_attrs:
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
+
+ def validate(self, all_vars=None):
+ ''' validation that is done at parse time, not load time '''
+ all_vars = {} if all_vars is None else all_vars
+
+ if not self._validated:
+ # walk all fields in the object
+ for (name, attribute) in self.fattributes.items():
+ # run validator only if present
+ method = getattr(self, '_validate_%s' % name, None)
+ if method:
+ method(attribute, name, getattr(self, name))
+ else:
+ # and make sure the attribute is of the type it should be
+ value = getattr(self, f'_{name}', Sentinel)
+ if value is not None:
+ if attribute.isa == 'string' and isinstance(value, (list, dict)):
+ raise AnsibleParserError(
+ "The field '%s' is supposed to be a string type,"
+ " however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()
+ )
+
+ self._validated = True
+
+ def _load_module_defaults(self, name, value):
+ if value is None:
+ return
+
+ if not isinstance(value, list):
+ value = [value]
+
+ validated_module_defaults = []
+ for defaults_dict in value:
+ if not isinstance(defaults_dict, dict):
+ raise AnsibleParserError(
+ "The field 'module_defaults' is supposed to be a dictionary or list of dictionaries, "
+ "the keys of which must be static action, module, or group names. Only the values may contain "
+ "templates. For example: {'ping': \"{{ ping_defaults }}\"}"
+ )
+
+ validated_defaults_dict = {}
+ for defaults_entry, defaults in defaults_dict.items():
+ # module_defaults do not use the 'collections' keyword, so actions and
+ # action_groups that are not fully qualified are part of the 'ansible.legacy'
+ # collection. Update those entries here, so module_defaults contains
+ # fully qualified entries.
+ if defaults_entry.startswith('group/'):
+ group_name = defaults_entry.split('group/')[-1]
+
+ # The resolved action_groups cache is associated saved on the current Play
+ if self.play is not None:
+ group_name, dummy = self._resolve_group(group_name)
+
+ defaults_entry = 'group/' + group_name
+ validated_defaults_dict[defaults_entry] = defaults
+
+ else:
+ if len(defaults_entry.split('.')) < 3:
+ defaults_entry = 'ansible.legacy.' + defaults_entry
+
+ resolved_action = self._resolve_action(defaults_entry)
+ if resolved_action:
+ validated_defaults_dict[resolved_action] = defaults
+
+ # If the defaults_entry is an ansible.legacy plugin, these defaults
+ # are inheritable by the 'ansible.builtin' subset, but are not
+ # required to exist.
+ if defaults_entry.startswith('ansible.legacy.'):
+ resolved_action = self._resolve_action(
+ defaults_entry.replace('ansible.legacy.', 'ansible.builtin.'),
+ mandatory=False
+ )
+ if resolved_action:
+ validated_defaults_dict[resolved_action] = defaults
+
+ validated_module_defaults.append(validated_defaults_dict)
+
+ return validated_module_defaults
+
+ @property
+ def play(self):
+ if hasattr(self, '_play'):
+ play = self._play
+ elif hasattr(self, '_parent') and hasattr(self._parent, '_play'):
+ play = self._parent._play
+ else:
+ play = self
+
+ if play.__class__.__name__ != 'Play':
+ # Should never happen, but handle gracefully by returning None, just in case
+ return None
+
+ return play
+
+ def _resolve_group(self, fq_group_name, mandatory=True):
+ if not AnsibleCollectionRef.is_valid_fqcr(fq_group_name):
+ collection_name = 'ansible.builtin'
+ fq_group_name = collection_name + '.' + fq_group_name
+ else:
+ collection_name = '.'.join(fq_group_name.split('.')[0:2])
+
+ # Check if the group has already been resolved and cached
+ if fq_group_name in self.play._group_actions:
+ return fq_group_name, self.play._group_actions[fq_group_name]
+
+ try:
+ action_groups = _get_collection_metadata(collection_name).get('action_groups', {})
+ except ValueError:
+ if not mandatory:
+ display.vvvvv("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
+ return fq_group_name, []
+
+ raise AnsibleParserError("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
+
+ # The collection may or may not use the fully qualified name
+ # Don't fail if the group doesn't exist in the collection
+ resource_name = fq_group_name.split(collection_name + '.')[-1]
+ action_group = action_groups.get(
+ fq_group_name,
+ action_groups.get(resource_name)
+ )
+ if action_group is None:
+ if not mandatory:
+ display.vvvvv("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
+ return fq_group_name, []
+ raise AnsibleParserError("Error loading module_defaults: could not resolve the module_defaults group %s" % fq_group_name)
+
+ resolved_actions = []
+ include_groups = []
+
+ found_group_metadata = False
+ for action in action_group:
+ # Everything should be a string except the metadata entry
+ if not isinstance(action, string_types):
+ _validate_action_group_metadata(action, found_group_metadata, fq_group_name)
+
+ if isinstance(action['metadata'], dict):
+ found_group_metadata = True
+
+ include_groups = action['metadata'].get('extend_group', [])
+ if isinstance(include_groups, string_types):
+ include_groups = [include_groups]
+ if not isinstance(include_groups, list):
+ # Bad entries may be a warning above, but prevent tracebacks by setting it back to the acceptable type.
+ include_groups = []
+ continue
+
+ # The collection may or may not use the fully qualified name.
+ # If not, it's part of the current collection.
+ if not AnsibleCollectionRef.is_valid_fqcr(action):
+ action = collection_name + '.' + action
+ resolved_action = self._resolve_action(action, mandatory=False)
+ if resolved_action:
+ resolved_actions.append(resolved_action)
+
+ for action in resolved_actions:
+ if action not in self.play._action_groups:
+ self.play._action_groups[action] = []
+ self.play._action_groups[action].append(fq_group_name)
+
+ self.play._group_actions[fq_group_name] = resolved_actions
+
+ # Resolve extended groups last, after caching the group in case they recursively refer to each other
+ for include_group in include_groups:
+ if not AnsibleCollectionRef.is_valid_fqcr(include_group):
+ include_group = collection_name + '.' + include_group
+
+ dummy, group_actions = self._resolve_group(include_group, mandatory=False)
+
+ for action in group_actions:
+ if action not in self.play._action_groups:
+ self.play._action_groups[action] = []
+ self.play._action_groups[action].append(fq_group_name)
+
+ self.play._group_actions[fq_group_name].extend(group_actions)
+ resolved_actions.extend(group_actions)
+
+ return fq_group_name, resolved_actions
+
+ def _resolve_action(self, action_name, mandatory=True):
+ context = module_loader.find_plugin_with_context(action_name)
+ if context.resolved and not context.action_plugin:
+ prefer = action_loader.find_plugin_with_context(action_name)
+ if prefer.resolved:
+ context = prefer
+ elif not context.resolved:
+ context = action_loader.find_plugin_with_context(action_name)
+
+ if context.resolved:
+ return context.resolved_fqcn
+ if mandatory:
+ raise AnsibleParserError("Could not resolve action %s in module_defaults" % action_name)
+ display.vvvvv("Could not resolve action %s in module_defaults" % action_name)
+
+ def squash(self):
+ '''
+ Evaluates all attributes and sets them to the evaluated version,
+ so that all future accesses of attributes do not need to evaluate
+ parent attributes.
+ '''
+ if not self._squashed:
+ for name in self.fattributes:
+ setattr(self, name, getattr(self, name))
+ self._squashed = True
+
+ def copy(self):
+ '''
+ Create a copy of this object and return it.
+ '''
+
+ try:
+ new_me = self.__class__()
+ except RuntimeError as e:
+ raise AnsibleError("Exceeded maximum object depth. This may have been caused by excessive role recursion", orig_exc=e)
+
+ for name in self.fattributes:
+ setattr(new_me, name, shallowcopy(getattr(self, f'_{name}', Sentinel)))
+
+ new_me._loader = self._loader
+ new_me._variable_manager = self._variable_manager
+ new_me._validated = self._validated
+ new_me._finalized = self._finalized
+ new_me._uuid = self._uuid
+
+ # if the ds value was set on the object, copy it to the new copy too
+ if hasattr(self, '_ds'):
+ new_me._ds = self._ds
+
+ return new_me
+
+ def get_validated_value(self, name, attribute, value, templar):
+ if attribute.isa == 'string':
+ value = to_text(value)
+ elif attribute.isa == 'int':
+ value = int(value)
+ elif attribute.isa == 'float':
+ value = float(value)
+ elif attribute.isa == 'bool':
+ value = boolean(value, strict=True)
+ elif attribute.isa == 'percent':
+ # special value, which may be an integer or float
+ # with an optional '%' at the end
+ if isinstance(value, string_types) and '%' in value:
+ value = value.replace('%', '')
+ value = float(value)
+ elif attribute.isa == 'list':
+ if value is None:
+ value = []
+ elif not isinstance(value, list):
+ value = [value]
+ if attribute.listof is not None:
+ for item in value:
+ if not isinstance(item, attribute.listof):
+ raise AnsibleParserError("the field '%s' should be a list of %s, "
+ "but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
+ elif attribute.required and attribute.listof == string_types:
+ if item is None or item.strip() == "":
+ raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
+ elif attribute.isa == 'set':
+ if value is None:
+ value = set()
+ elif not isinstance(value, (list, set)):
+ if isinstance(value, string_types):
+ value = value.split(',')
+ else:
+ # Making a list like this handles strings of
+ # text and bytes properly
+ value = [value]
+ if not isinstance(value, set):
+ value = set(value)
+ elif attribute.isa == 'dict':
+ if value is None:
+ value = dict()
+ elif not isinstance(value, dict):
+ raise TypeError("%s is not a dictionary" % value)
+ elif attribute.isa == 'class':
+ if not isinstance(value, attribute.class_type):
+ raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
+ value.post_validate(templar=templar)
+ return value
+
+ def set_to_context(self, name):
+ ''' set to parent inherited value or Sentinel as appropriate'''
+
+ attribute = self.fattributes[name]
+ if isinstance(attribute, NonInheritableFieldAttribute):
+ # setting to sentinel will trigger 'default/default()' on getter
+ setattr(self, name, Sentinel)
+ else:
+ try:
+ setattr(self, name, self._get_parent_attribute(name, omit=True))
+ except AttributeError:
+ # mostly playcontext as only tasks/handlers/blocks really resolve parent
+ setattr(self, name, Sentinel)
+
+ def post_validate(self, templar):
+ '''
+ we can't tell that everything is of the right type until we have
+ all the variables. Run basic types (from isa) as well as
+ any _post_validate_<foo> functions.
+ '''
+
+ # save the omit value for later checking
+ omit_value = templar.available_variables.get('omit')
+
+ for (name, attribute) in self.fattributes.items():
+ if attribute.static:
+ value = getattr(self, name)
+
+ # we don't template 'vars' but allow template as values for later use
+ if name not in ('vars',) and templar.is_template(value):
+ display.warning('"%s" is not templatable, but we found: %s, '
+ 'it will not be templated and will be used "as is".' % (name, value))
+ continue
+
+ if getattr(self, name) is None:
+ if not attribute.required:
+ continue
+ else:
+ raise AnsibleParserError("the field '%s' is required but was not set" % name)
+ elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
+ # Intermediate objects like Play() won't have their fields validated by
+ # default, as their values are often inherited by other objects and validated
+ # later, so we don't want them to fail out early
+ continue
+
+ try:
+ # Run the post-validator if present. These methods are responsible for
+ # using the given templar to template the values, if required.
+ method = getattr(self, '_post_validate_%s' % name, None)
+ if method:
+ value = method(attribute, getattr(self, name), templar)
+ elif attribute.isa == 'class':
+ value = getattr(self, name)
+ else:
+ # if the attribute contains a variable, template it now
+ value = templar.template(getattr(self, name))
+
+ # If this evaluated to the omit value, set the value back to inherited by context
+ # or default specified in the FieldAttribute and move on
+ if omit_value is not None and value == omit_value:
+ self.set_to_context(name)
+ continue
+
+ # and make sure the attribute is of the type it should be
+ if value is not None:
+ value = self.get_validated_value(name, attribute, value, templar)
+
+ # and assign the massaged value back to the attribute field
+ setattr(self, name, value)
+ except (TypeError, ValueError) as e:
+ value = getattr(self, name)
+ raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
+ "The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ if templar._fail_on_undefined_errors and name != 'name':
+ if name == 'args':
+ msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e))
+ else:
+ msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e))
+ raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
+
+ self._finalized = True
+
+ def _load_vars(self, attr, ds):
+ '''
+ Vars in a play can be specified either as a dictionary directly, or
+ as a list of dictionaries. If the later, this method will turn the
+ list into a single dictionary.
+ '''
+
+ def _validate_variable_keys(ds):
+ for key in ds:
+ if not isidentifier(key):
+ raise TypeError("'%s' is not a valid variable name" % key)
+
+ try:
+ if isinstance(ds, dict):
+ _validate_variable_keys(ds)
+ return combine_vars(self.vars, ds)
+ elif isinstance(ds, list):
+ all_vars = self.vars
+ for item in ds:
+ if not isinstance(item, dict):
+ raise ValueError
+ _validate_variable_keys(item)
+ all_vars = combine_vars(all_vars, item)
+ return all_vars
+ elif ds is None:
+ return {}
+ else:
+ raise ValueError
+ except ValueError as e:
+ raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__,
+ obj=ds, orig_exc=e)
+ except TypeError as e:
+ raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
+
+ def _extend_value(self, value, new_value, prepend=False):
+ '''
+ Will extend the value given with new_value (and will turn both
+ into lists if they are not so already). The values are run through
+ a set to remove duplicate values.
+ '''
+
+ if not isinstance(value, list):
+ value = [value]
+ if not isinstance(new_value, list):
+ new_value = [new_value]
+
+ # Due to where _extend_value may run for some attributes
+ # it is possible to end up with Sentinel in the list of values
+ # ensure we strip them
+ value = [v for v in value if v is not Sentinel]
+ new_value = [v for v in new_value if v is not Sentinel]
+
+ if prepend:
+ combined = new_value + value
+ else:
+ combined = value + new_value
+
+ return [i for i, _ in itertools.groupby(combined) if i is not None]
+
+ def dump_attrs(self):
+ '''
+ Dumps all attributes to a dictionary
+ '''
+ attrs = {}
+ for (name, attribute) in self.fattributes.items():
+ attr = getattr(self, name)
+ if attribute.isa == 'class' and hasattr(attr, 'serialize'):
+ attrs[name] = attr.serialize()
+ else:
+ attrs[name] = attr
+ return attrs
+
+ def from_attrs(self, attrs):
+ '''
+ Loads attributes from a dictionary
+ '''
+ for (attr, value) in attrs.items():
+ if attr in self.fattributes:
+ attribute = self.fattributes[attr]
+ if attribute.isa == 'class' and isinstance(value, dict):
+ obj = attribute.class_type()
+ obj.deserialize(value)
+ setattr(self, attr, obj)
+ else:
+ setattr(self, attr, value)
+
+ # from_attrs is only used to create a finalized task
+ # from attrs from the Worker/TaskExecutor
+ # Those attrs are finalized and squashed in the TE
+ # and controller side use needs to reflect that
+ self._finalized = True
+ self._squashed = True
+
+ def serialize(self):
+ '''
+ Serializes the object derived from the base object into
+ a dictionary of values. This only serializes the field
+ attributes for the object, so this may need to be overridden
+ for any classes which wish to add additional items not stored
+ as field attributes.
+ '''
+
+ repr = self.dump_attrs()
+
+ # serialize the uuid field
+ repr['uuid'] = self._uuid
+ repr['finalized'] = self._finalized
+ repr['squashed'] = self._squashed
+
+ return repr
+
+ def deserialize(self, data):
+ '''
+ Given a dictionary of values, load up the field attributes for
+ this object. As with serialize(), if there are any non-field
+ attribute data members, this method will need to be overridden
+ and extended.
+ '''
+
+ if not isinstance(data, dict):
+ raise AnsibleAssertionError('data (%s) should be a dict but is a %s' % (data, type(data)))
+
+ for (name, attribute) in self.fattributes.items():
+ if name in data:
+ setattr(self, name, data[name])
+ else:
+ self.set_to_context(name)
+
+ # restore the UUID field
+ setattr(self, '_uuid', data.get('uuid'))
+ self._finalized = data.get('finalized', False)
+ self._squashed = data.get('squashed', False)
+
+
+class Base(FieldAttributeBase):
+
+ name = NonInheritableFieldAttribute(isa='string', default='', always_post_validate=True)
+
+ # connection/transport
+ connection = ConnectionFieldAttribute(isa='string', default=context.cliargs_deferred_get('connection'))
+ port = FieldAttribute(isa='int')
+ remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
+
+ # variables
+ vars = NonInheritableFieldAttribute(isa='dict', priority=100, static=True)
+
+ # module default params
+ module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
+
+ # flags and misc. settings
+ environment = FieldAttribute(isa='list', extend=True, prepend=True)
+ no_log = FieldAttribute(isa='bool')
+ run_once = FieldAttribute(isa='bool')
+ ignore_errors = FieldAttribute(isa='bool')
+ ignore_unreachable = FieldAttribute(isa='bool')
+ check_mode = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('check'))
+ diff = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('diff'))
+ any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
+ throttle = FieldAttribute(isa='int', default=0)
+ timeout = FieldAttribute(isa='int', default=C.TASK_TIMEOUT)
+
+ # explicitly invoke a debugger on tasks
+ debugger = FieldAttribute(isa='string')
+
+ # Privilege escalation
+ become = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('become'))
+ become_method = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_method'))
+ become_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_user'))
+ become_flags = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_flags'))
+ become_exe = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_exe'))
+
+ # used to hold sudo/su stuff
+ DEPRECATED_ATTRIBUTES = [] # type: list[str]
+
+ def get_path(self):
+ ''' return the absolute path of the playbook object and its line number '''
+
+ path = ""
+ try:
+ path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
+ except AttributeError:
+ try:
+ path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
+ except AttributeError:
+ pass
+ return path
+
+ def get_dep_chain(self):
+
+ if hasattr(self, '_parent') and self._parent:
+ return self._parent.get_dep_chain()
+ else:
+ return None
+
+ def get_search_path(self):
+ '''
+ Return the list of paths you should search for files, in order.
+ This follows role/playbook dependency chain.
+ '''
+ path_stack = []
+
+ dep_chain = self.get_dep_chain()
+ # inside role: add the dependency chain from current to dependent
+ if dep_chain:
+ path_stack.extend(reversed([x._role_path for x in dep_chain if hasattr(x, '_role_path')]))
+
+ # add path of task itself, unless it is already in the list
+ task_dir = os.path.dirname(self.get_path())
+ if task_dir not in path_stack:
+ path_stack.append(task_dir)
+
+ return path_stack
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
new file mode 100644
index 0000000..fabaf7f
--- /dev/null
+++ b/lib/ansible/playbook/block.py
@@ -0,0 +1,446 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import FieldAttribute, NonInheritableFieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_tasks
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.utils.sentinel import Sentinel
+
+
+class Block(Base, Conditional, CollectionSearch, Taggable):
+
+ # main block fields containing the task lists
+ block = NonInheritableFieldAttribute(isa='list', default=list)
+ rescue = NonInheritableFieldAttribute(isa='list', default=list)
+ always = NonInheritableFieldAttribute(isa='list', default=list)
+
+ # other fields for task compat
+ notify = FieldAttribute(isa='list')
+ delegate_to = FieldAttribute(isa='string')
+ delegate_facts = FieldAttribute(isa='bool')
+
+ # for future consideration? this would be functionally
+ # similar to the 'else' clause for exceptions
+ # otherwise = FieldAttribute(isa='list')
+
+ def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
+ self._play = play
+ self._role = role
+ self._parent = None
+ self._dep_chain = None
+ self._use_handlers = use_handlers
+ self._implicit = implicit
+
+ if task_include:
+ self._parent = task_include
+ elif parent_block:
+ self._parent = parent_block
+
+ super(Block, self).__init__()
+
+ def __repr__(self):
+ return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
+
+ def __eq__(self, other):
+ '''object comparison based on _uuid'''
+ return self._uuid == other._uuid
+
+ def __ne__(self, other):
+ '''object comparison based on _uuid'''
+ return self._uuid != other._uuid
+
+ def get_vars(self):
+ '''
+ Blocks do not store variables directly, however they may be a member
+ of a role or task include which does, so return those if present.
+ '''
+
+ all_vars = {}
+
+ if self._parent:
+ all_vars |= self._parent.get_vars()
+
+ all_vars |= self.vars.copy()
+
+ return all_vars
+
+ @staticmethod
+ def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ implicit = not Block.is_block(data)
+ b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
+ return b.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ @staticmethod
+ def is_block(ds):
+ is_block = False
+ if isinstance(ds, dict):
+ for attr in ('block', 'rescue', 'always'):
+ if attr in ds:
+ is_block = True
+ break
+ return is_block
+
+ def preprocess_data(self, ds):
+ '''
+ If a simple task is given, an implicit block for that single task
+ is created, which goes in the main portion of the block
+ '''
+
+ if not Block.is_block(ds):
+ if isinstance(ds, list):
+ return super(Block, self).preprocess_data(dict(block=ds))
+ else:
+ return super(Block, self).preprocess_data(dict(block=[ds]))
+
+ return super(Block, self).preprocess_data(ds)
+
+ def _load_block(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds, orig_exc=e)
+
+ def _load_rescue(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds, orig_exc=e)
+
+ def _load_always(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds, orig_exc=e)
+
+ def _validate_always(self, attr, name, value):
+ if value and not self.block:
+ raise AnsibleParserError("'%s' keyword cannot be used without 'block'" % name, obj=self._ds)
+
+ _validate_rescue = _validate_always
+
+ def get_dep_chain(self):
+ if self._dep_chain is None:
+ if self._parent:
+ return self._parent.get_dep_chain()
+ else:
+ return None
+ else:
+ return self._dep_chain[:]
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ def _dupe_task_list(task_list, new_block):
+ new_task_list = []
+ for task in task_list:
+ new_task = task.copy(exclude_parent=True)
+ if task._parent:
+ new_task._parent = task._parent.copy(exclude_tasks=True)
+ if task._parent == new_block:
+ # If task._parent is the same as new_block, just replace it
+ new_task._parent = new_block
+ else:
+ # task may not be a direct child of new_block, search for the correct place to insert new_block
+ cur_obj = new_task._parent
+ while cur_obj._parent and cur_obj._parent != new_block:
+ cur_obj = cur_obj._parent
+
+ cur_obj._parent = new_block
+ else:
+ new_task._parent = new_block
+ new_task_list.append(new_task)
+ return new_task_list
+
+ new_me = super(Block, self).copy()
+ new_me._play = self._play
+ new_me._use_handlers = self._use_handlers
+
+ if self._dep_chain is not None:
+ new_me._dep_chain = self._dep_chain[:]
+
+ new_me._parent = None
+ if self._parent and not exclude_parent:
+ new_me._parent = self._parent.copy(exclude_tasks=True)
+
+ if not exclude_tasks:
+ new_me.block = _dupe_task_list(self.block or [], new_me)
+ new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
+ new_me.always = _dupe_task_list(self.always or [], new_me)
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me.validate()
+ return new_me
+
+ def serialize(self):
+ '''
+ Override of the default serialize method, since when we're serializing
+ a task we don't want to include the attribute list of tasks.
+ '''
+
+ data = dict()
+ for attr in self.fattributes:
+ if attr not in ('block', 'rescue', 'always'):
+ data[attr] = getattr(self, attr)
+
+ data['dep_chain'] = self.get_dep_chain()
+
+ if self._role is not None:
+ data['role'] = self._role.serialize()
+ if self._parent is not None:
+ data['parent'] = self._parent.copy(exclude_tasks=True).serialize()
+ data['parent_type'] = self._parent.__class__.__name__
+
+ return data
+
+ def deserialize(self, data):
+ '''
+ Override of the default deserialize method, to match the above overridden
+ serialize method
+ '''
+
+ # import is here to avoid import loops
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+
+ # we don't want the full set of attributes (the task lists), as that
+ # would lead to a serialize/deserialize loop
+ for attr in self.fattributes:
+ if attr in data and attr not in ('block', 'rescue', 'always'):
+ setattr(self, attr, data.get(attr))
+
+ self._dep_chain = data.get('dep_chain', None)
+
+ # if there was a serialized role, unpack it too
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+
+ parent_data = data.get('parent')
+ if parent_data:
+ parent_type = data.get('parent_type')
+ if parent_type == 'Block':
+ p = Block()
+ elif parent_type == 'TaskInclude':
+ p = TaskInclude()
+ elif parent_type == 'HandlerTaskInclude':
+ p = HandlerTaskInclude()
+ p.deserialize(parent_data)
+ self._parent = p
+ self._dep_chain = self._parent.get_dep_chain()
+
+ def set_loader(self, loader):
+ self._loader = loader
+ if self._parent:
+ self._parent.set_loader(loader)
+ elif self._role:
+ self._role.set_loader(loader)
+
+ dep_chain = self.get_dep_chain()
+ if dep_chain:
+ for dep in dep_chain:
+ dep.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, omit=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a block value.
+ '''
+ fattr = self.fattributes[attr]
+
+ extend = fattr.extend
+ prepend = fattr.prepend
+
+ try:
+ # omit self, and only get parent values
+ if omit:
+ value = Sentinel
+ else:
+ value = getattr(self, f'_{attr}', Sentinel)
+
+ # If parent is static, we can grab attrs from the parent
+ # otherwise, defer to the grandparent
+ if getattr(self._parent, 'statically_loaded', True):
+ _parent = self._parent
+ else:
+ _parent = self._parent._parent
+
+ if _parent and (value is Sentinel or extend):
+ try:
+ if getattr(_parent, 'statically_loaded', True):
+ if hasattr(_parent, '_get_parent_attribute'):
+ parent_value = _parent._get_parent_attribute(attr)
+ else:
+ parent_value = getattr(_parent, f'_{attr}', Sentinel)
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+ except AttributeError:
+ pass
+ if self._role and (value is Sentinel or extend):
+ try:
+ parent_value = getattr(self._role, f'_{attr}', Sentinel)
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+
+ dep_chain = self.get_dep_chain()
+ if dep_chain and (value is Sentinel or extend):
+ dep_chain.reverse()
+ for dep in dep_chain:
+ dep_value = getattr(dep, f'_{attr}', Sentinel)
+ if extend:
+ value = self._extend_value(value, dep_value, prepend)
+ else:
+ value = dep_value
+
+ if value is not Sentinel and not extend:
+ break
+ except AttributeError:
+ pass
+ if self._play and (value is Sentinel or extend):
+ try:
+ play_value = getattr(self._play, f'_{attr}', Sentinel)
+ if play_value is not Sentinel:
+ if extend:
+ value = self._extend_value(value, play_value, prepend)
+ else:
+ value = play_value
+ except AttributeError:
+ pass
+ except KeyError:
+ pass
+
+ return value
+
+ def filter_tagged_tasks(self, all_vars):
+ '''
+ Creates a new block, with task lists filtered based on the tags.
+ '''
+
+ def evaluate_and_append_task(target):
+ tmp_list = []
+ for task in target:
+ if isinstance(task, Block):
+ filtered_block = evaluate_block(task)
+ if filtered_block.has_tasks():
+ tmp_list.append(filtered_block)
+ elif ((task.action in C._ACTION_META and task.implicit) or
+ (task.action in C._ACTION_INCLUDE and task.evaluate_tags([], self._play.skip_tags, all_vars=all_vars)) or
+ task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars)):
+ tmp_list.append(task)
+ return tmp_list
+
+ def evaluate_block(block):
+ new_block = block.copy(exclude_parent=True, exclude_tasks=True)
+ new_block._parent = block._parent
+ new_block.block = evaluate_and_append_task(block.block)
+ new_block.rescue = evaluate_and_append_task(block.rescue)
+ new_block.always = evaluate_and_append_task(block.always)
+ return new_block
+
+ return evaluate_block(self)
+
+ def get_tasks(self):
+ def evaluate_and_append_task(target):
+ tmp_list = []
+ for task in target:
+ if isinstance(task, Block):
+ tmp_list.extend(evaluate_block(task))
+ else:
+ tmp_list.append(task)
+ return tmp_list
+
+ def evaluate_block(block):
+ rv = evaluate_and_append_task(block.block)
+ rv.extend(evaluate_and_append_task(block.rescue))
+ rv.extend(evaluate_and_append_task(block.always))
+ return rv
+
+ return evaluate_block(self)
+
+ def has_tasks(self):
+ return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
+
+ def get_include_params(self):
+ if self._parent:
+ return self._parent.get_include_params()
+ else:
+ return dict()
+
+ def all_parents_static(self):
+ '''
+ Determine if all of the parents of this block were statically loaded
+ or not. Since Task/TaskInclude objects may be in the chain, they simply
+ call their parents all_parents_static() method. Only Block objects in
+ the chain check the statically_loaded value of the parent.
+ '''
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
+ return False
+ return self._parent.all_parents_static()
+
+ return True
+
+ def get_first_parent_include(self):
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude):
+ return self._parent
+ return self._parent.get_first_parent_include()
+ return None
diff --git a/lib/ansible/playbook/collectionsearch.py b/lib/ansible/playbook/collectionsearch.py
new file mode 100644
index 0000000..2980093
--- /dev/null
+++ b/lib/ansible/playbook/collectionsearch.py
@@ -0,0 +1,63 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.template import is_template
+from ansible.utils.display import Display
+
+from jinja2.nativetypes import NativeEnvironment
+
+display = Display()
+
+
+def _ensure_default_collection(collection_list=None):
+ default_collection = AnsibleCollectionConfig.default_collection
+
+ # Will be None when used as the default
+ if collection_list is None:
+ collection_list = []
+
+ # FIXME: exclude role tasks?
+ if default_collection and default_collection not in collection_list:
+ collection_list.insert(0, default_collection)
+
+ # if there's something in the list, ensure that builtin or legacy is always there too
+ if collection_list and 'ansible.builtin' not in collection_list and 'ansible.legacy' not in collection_list:
+ collection_list.append('ansible.legacy')
+
+ return collection_list
+
+
+class CollectionSearch:
+
+ # this needs to be populated before we can resolve tasks/roles/etc
+ collections = FieldAttribute(isa='list', listof=string_types, priority=100, default=_ensure_default_collection,
+ always_post_validate=True, static=True)
+
+ def _load_collections(self, attr, ds):
+ # We are always a mixin with Base, so we can validate this untemplated
+ # field early on to guarantee we are dealing with a list.
+ ds = self.get_validated_value('collections', self.fattributes.get('collections'), ds, None)
+
+ # this will only be called if someone specified a value; call the shared value
+ _ensure_default_collection(collection_list=ds)
+
+ if not ds: # don't return an empty collection list, just return None
+ return None
+
+ # This duplicates static attr checking logic from post_validate()
+ # because if the user attempts to template a collection name, it may
+ # error before it ever gets to the post_validate() warning (e.g. trying
+ # to import a role from the collection).
+ env = NativeEnvironment()
+ for collection_name in ds:
+ if is_template(collection_name, env):
+ display.warning('"collections" is not templatable, but we found: %s, '
+ 'it will not be templated and will be used "as is".' % (collection_name))
+
+ return ds
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
new file mode 100644
index 0000000..fe07358
--- /dev/null
+++ b/lib/ansible/playbook/conditional.py
@@ -0,0 +1,221 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import re
+
+from jinja2.compiler import generate
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_native, to_text
+from ansible.playbook.attribute import FieldAttribute
+from ansible.utils.display import Display
+
+display = Display()
+
+DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
+LOOKUP_REGEX = re.compile(r'lookup\s*\(')
+VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
+
+
+class Conditional:
+
+ '''
+ This is a mix-in class, to be used with Base to allow the object
+ to be run conditionally when a condition is met or skipped.
+ '''
+
+ when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
+
+ def __init__(self, loader=None):
+ # when used directly, this class needs a loader, but we want to
+ # make sure we don't trample on the existing one if this class
+ # is used as a mix-in with a playbook base class
+ if not hasattr(self, '_loader'):
+ if loader is None:
+ raise AnsibleError("a loader must be specified when using Conditional() directly")
+ else:
+ self._loader = loader
+ super(Conditional, self).__init__()
+
+ def _validate_when(self, attr, name, value):
+ if not isinstance(value, list):
+ setattr(self, name, [value])
+
+ def extract_defined_undefined(self, conditional):
+ results = []
+
+ cond = conditional
+ m = DEFINED_REGEX.search(cond)
+ while m:
+ results.append(m.groups())
+ cond = cond[m.end():]
+ m = DEFINED_REGEX.search(cond)
+
+ return results
+
+ def evaluate_conditional(self, templar, all_vars):
+ '''
+ Loops through the conditionals set on this object, returning
+ False if any of them evaluate as such.
+ '''
+
+ # since this is a mix-in, it may not have an underlying datastructure
+ # associated with it, so we pull it out now in case we need it for
+ # error reporting below
+ ds = None
+ if hasattr(self, '_ds'):
+ ds = getattr(self, '_ds')
+
+ result = True
+ try:
+ for conditional in self.when:
+
+ # do evaluation
+ if conditional is None or conditional == '':
+ res = True
+ elif isinstance(conditional, bool):
+ res = conditional
+ else:
+ res = self._check_conditional(conditional, templar, all_vars)
+
+ # only update if still true, preserve false
+ if result:
+ result = res
+
+ display.debug("Evaluated conditional (%s): %s" % (conditional, res))
+ if not result:
+ break
+
+ except Exception as e:
+ raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
+
+ return result
+
+ def _check_conditional(self, conditional, templar, all_vars):
+ '''
+ This method does the low-level evaluation of each conditional
+ set on this object, using jinja2 to wrap the conditionals for
+ evaluation.
+ '''
+
+ original = conditional
+
+ if templar.is_template(conditional):
+ display.warning('conditional statements should not include jinja2 '
+ 'templating delimiters such as {{ }} or {%% %%}. '
+ 'Found: %s' % conditional)
+
+ # make sure the templar is using the variables specified with this method
+ templar.available_variables = all_vars
+
+ try:
+ # if the conditional is "unsafe", disable lookups
+ disable_lookups = hasattr(conditional, '__UNSAFE__')
+ conditional = templar.template(conditional, disable_lookups=disable_lookups)
+
+ if not isinstance(conditional, text_type) or conditional == "":
+ return conditional
+
+ # update the lookups flag, as the string returned above may now be unsafe
+ # and we don't want future templating calls to do unsafe things
+ disable_lookups |= hasattr(conditional, '__UNSAFE__')
+
+ # First, we do some low-level jinja2 parsing involving the AST format of the
+ # statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False, inside_yield=False):
+ if isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Yield):
+ inside_yield = True
+ elif isinstance(node, ast.Str):
+ if disable_lookups:
+ if inside_call and node.s.startswith("__"):
+ # calling things with a dunder is generally bad at this point...
+ raise AnsibleError(
+ "Invalid access found in the conditional: '%s'" % conditional
+ )
+ elif inside_yield:
+ # we're inside a yield, so recursively parse and traverse the AST
+ # of the result to catch forbidden syntax from executing
+ parsed = ast.parse(node.s, mode='exec')
+ cnv = CleansingNodeVisitor()
+ cnv.visit(parsed)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(
+ child_node,
+ inside_call=inside_call,
+ inside_yield=inside_yield
+ )
+ try:
+ res = templar.environment.parse(conditional, None, None)
+ res = generate(res, templar.environment, None, None)
+ parsed = ast.parse(res, mode='exec')
+
+ cnv = CleansingNodeVisitor()
+ cnv.visit(parsed)
+ except Exception as e:
+ raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
+
+ # and finally we generate and template the presented string and look at the resulting string
+ # NOTE The spaces around True and False are intentional to short-circuit literal_eval for
+ # jinja2_native=False and avoid its expensive calls.
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ val = templar.template(presented, disable_lookups=disable_lookups).strip()
+ if val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise AnsibleError("unable to evaluate conditional: %s" % original)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ # the templating failed, meaning most likely a variable was undefined. If we happened
+ # to be looking for an undefined variable, return True, otherwise fail
+ try:
+ # first we extract the variable name from the error message
+ var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
+ # next we extract all defined/undefined tests from the conditional string
+ def_undef = self.extract_defined_undefined(conditional)
+ # then we loop through these, comparing the error variable name against
+ # each def/undef test we found above. If there is a match, we determine
+ # whether the logic/state mean the variable should exist or not and return
+ # the corresponding True/False
+ for (du_var, logic, state) in def_undef:
+ # when we compare the var names, normalize quotes because something
+ # like hostvars['foo'] may be tested against hostvars["foo"]
+ if var_name.replace("'", '"') == du_var.replace("'", '"'):
+ # the should exist is a xor test between a negation in the logic portion
+ # against the state (defined or undefined)
+ should_exist = ('not' in logic) != (state == 'defined')
+ if should_exist:
+ return False
+ else:
+ return True
+ # as nothing above matched the failed var name, re-raise here to
+ # trigger the AnsibleUndefinedVariable exception again below
+ raise
+ except Exception:
+ raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
diff --git a/lib/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py
new file mode 100644
index 0000000..675eecb
--- /dev/null
+++ b/lib/ansible/playbook/handler.py
@@ -0,0 +1,62 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.task import Task
+from ansible.module_utils.six import string_types
+
+
+class Handler(Task):
+
+ listen = FieldAttribute(isa='list', default=list, listof=string_types, static=True)
+
+ def __init__(self, block=None, role=None, task_include=None):
+ self.notified_hosts = []
+
+ self.cached_name = False
+
+ super(Handler, self).__init__(block=block, role=role, task_include=task_include)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the handler '''
+ return "HANDLER: %s" % self.get_name()
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Handler(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def notify_host(self, host):
+ if not self.is_host_notified(host):
+ self.notified_hosts.append(host)
+ return True
+ return False
+
+ def remove_host(self, host):
+ self.notified_hosts = [h for h in self.notified_hosts if h != host]
+
+ def is_host_notified(self, host):
+ return host in self.notified_hosts
+
+ def serialize(self):
+ result = super(Handler, self).serialize()
+ result['is_handler'] = True
+ return result
diff --git a/lib/ansible/playbook/handler_task_include.py b/lib/ansible/playbook/handler_task_include.py
new file mode 100644
index 0000000..1c779f8
--- /dev/null
+++ b/lib/ansible/playbook/handler_task_include.py
@@ -0,0 +1,39 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# from ansible.inventory.host import Host
+from ansible.playbook.handler import Handler
+from ansible.playbook.task_include import TaskInclude
+
+
+class HandlerTaskInclude(Handler, TaskInclude):
+
+ VALID_INCLUDE_KEYWORDS = TaskInclude.VALID_INCLUDE_KEYWORDS.union(('listen',))
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
+ handler = t.check_options(
+ t.load_data(data, variable_manager=variable_manager, loader=loader),
+ data
+ )
+
+ return handler
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
new file mode 100644
index 0000000..38e32ef
--- /dev/null
+++ b/lib/ansible/playbook/helpers.py
@@ -0,0 +1,353 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of mixed task/block data (parsed from YAML),
+ return a list of Block() objects, where implicit blocks
+ are created for each bare Task.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+
+ if not isinstance(ds, (list, type(None))):
+ raise AnsibleAssertionError('%s should be a list or None but is %s' % (ds, type(ds)))
+
+ block_list = []
+ if ds:
+ count = iter(range(len(ds)))
+ for i in count:
+ block_ds = ds[i]
+ # Implicit blocks are created by bare tasks listed in a play without
+ # an explicit block statement. If we have two implicit blocks in a row,
+ # squash them down to a single block to save processing time later.
+ implicit_blocks = []
+ while block_ds is not None and not Block.is_block(block_ds):
+ implicit_blocks.append(block_ds)
+ i += 1
+ # Advance the iterator, so we don't repeat
+ next(count, None)
+ try:
+ block_ds = ds[i]
+ except IndexError:
+ block_ds = None
+
+ # Loop both implicit blocks and block_ds as block_ds is the next in the list
+ for b in (implicit_blocks, block_ds):
+ if b:
+ block_list.append(
+ Block.load(
+ b,
+ play=play,
+ parent_block=parent_block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ )
+
+ return block_list
+
+
+def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of task datastructures (parsed from YAML),
+ return a list of Task() or TaskInclude() objects.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+ from ansible.playbook.handler import Handler
+ from ansible.playbook.task import Task
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.role_include import IncludeRole
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+ from ansible.template import Templar
+ from ansible.utils.plugin_docs import get_versioned_doclink
+
+ if not isinstance(ds, list):
+ raise AnsibleAssertionError('The ds (%s) should be a list but was a %s' % (ds, type(ds)))
+
+ task_list = []
+ for task_ds in ds:
+ if not isinstance(task_ds, dict):
+ raise AnsibleAssertionError('The ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ if 'block' in task_ds:
+ if use_handlers:
+ raise AnsibleParserError("Using a block as a handler is not supported.", obj=task_ds)
+ t = Block.load(
+ task_ds,
+ play=play,
+ parent_block=block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ task_list.append(t)
+ else:
+ args_parser = ModuleArgsParser(task_ds)
+ try:
+ (action, args, delegate_to) = args_parser.parse(skip_action_validation=True)
+ except AnsibleParserError as e:
+ # if the raises exception was created with obj=ds args, then it includes the detail
+ # so we dont need to add it so we can just re raise.
+ if e.obj:
+ raise
+ # But if it wasn't, we can add the yaml object now to get more detail
+ raise AnsibleParserError(to_native(e), obj=task_ds, orig_exc=e)
+
+ if action in C._ACTION_ALL_INCLUDE_IMPORT_TASKS:
+
+ if use_handlers:
+ include_class = HandlerTaskInclude
+ else:
+ include_class = TaskInclude
+
+ t = include_class.load(
+ task_ds,
+ block=block,
+ role=role,
+ task_include=None,
+ variable_manager=variable_manager,
+ loader=loader
+ )
+
+ all_vars = variable_manager.get_vars(play=play, task=t)
+ templar = Templar(loader=loader, variables=all_vars)
+
+ # check to see if this include is dynamic or static:
+ # 1. the user has set the 'static' option to false or true
+ # 2. one of the appropriate config options was set
+ if action in C._ACTION_INCLUDE_TASKS:
+ is_static = False
+ elif action in C._ACTION_IMPORT_TASKS:
+ is_static = True
+ else:
+ include_link = get_versioned_doclink('user_guide/playbooks_reuse_includes.html')
+ display.deprecated('"include" is deprecated, use include_tasks/import_tasks instead. See %s for details' % include_link, "2.16")
+ is_static = not templar.is_template(t.args['_raw_params']) and t.all_parents_static() and not t.loop
+
+ if is_static:
+ if t.loop is not None:
+ if action in C._ACTION_IMPORT_TASKS:
+ raise AnsibleParserError("You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds)
+ else:
+ raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
+
+ # we set a flag to indicate this include was static
+ t.statically_loaded = True
+
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = block
+ cumulative_path = None
+
+ found = False
+ subdir = 'tasks'
+ if use_handlers:
+ subdir = 'handlers'
+ while parent_include is not None:
+ if not isinstance(parent_include, TaskInclude):
+ parent_include = parent_include._parent
+ continue
+ try:
+ parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
+ except AnsibleUndefinedVariable as e:
+ if not parent_include.statically_loaded:
+ raise AnsibleParserError(
+ "Error when evaluating variable in dynamic parent include path: %s. "
+ "When using static imports, the parent dynamic include cannot utilize host facts "
+ "or variables from inventory" % parent_include.args.get('_raw_params'),
+ obj=task_ds,
+ suppress_extended_error=True,
+ orig_exc=e
+ )
+ raise
+ if cumulative_path is None:
+ cumulative_path = parent_include_dir
+ elif not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
+ include_target = templar.template(t.args['_raw_params'])
+ if t._role:
+ new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
+ include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
+ else:
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
+
+ if os.path.exists(include_file):
+ found = True
+ break
+ else:
+ parent_include = parent_include._parent
+
+ if not found:
+ try:
+ include_target = templar.template(t.args['_raw_params'])
+ except AnsibleUndefinedVariable as e:
+ raise AnsibleParserError(
+ "Error when evaluating variable in import path: %s.\n\n"
+ "When using static imports, ensure that any variables used in their names are defined in vars/vars_files\n"
+ "or extra-vars passed in from the command line. Static imports cannot use variables from facts or inventory\n"
+ "sources like group or host vars." % t.args['_raw_params'],
+ obj=task_ds,
+ suppress_extended_error=True,
+ orig_exc=e)
+ if t._role:
+ include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
+ else:
+ include_file = loader.path_dwim(include_target)
+
+ data = loader.load_from_file(include_file)
+ if not data:
+ display.warning('file %s is empty and had no tasks to include' % include_file)
+ continue
+ elif not isinstance(data, list):
+ raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
+
+ # since we can't send callbacks here, we display a message directly in
+ # the same fashion used by the on_include callback. We also do it here,
+ # because the recursive nature of helper methods means we may be loading
+ # nested includes, and we want the include order printed correctly
+ display.vv("statically imported: %s" % include_file)
+
+ ti_copy = t.copy(exclude_parent=True)
+ ti_copy._parent = block
+ included_blocks = load_list_of_blocks(
+ data,
+ play=play,
+ parent_block=None,
+ task_include=ti_copy,
+ role=role,
+ use_handlers=use_handlers,
+ loader=loader,
+ variable_manager=variable_manager,
+ )
+
+ tags = ti_copy.tags[:]
+
+ # now we extend the tags on each of the included blocks
+ for b in included_blocks:
+ b.tags = list(set(b.tags).union(tags))
+ # END FIXME
+
+ # FIXME: handlers shouldn't need this special handling, but do
+ # right now because they don't iterate blocks correctly
+ if use_handlers:
+ for b in included_blocks:
+ task_list.extend(b.block)
+ else:
+ task_list.extend(included_blocks)
+ else:
+ t.is_static = False
+ task_list.append(t)
+
+ elif action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES:
+ if use_handlers:
+ raise AnsibleParserError(f"Using '{action}' as a handler is not supported.", obj=task_ds)
+
+ ir = IncludeRole.load(
+ task_ds,
+ block=block,
+ role=role,
+ task_include=None,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+
+ # 1. the user has set the 'static' option to false or true
+ # 2. one of the appropriate config options was set
+ is_static = False
+ if action in C._ACTION_IMPORT_ROLE:
+ is_static = True
+
+ if is_static:
+ if ir.loop is not None:
+ if action in C._ACTION_IMPORT_ROLE:
+ raise AnsibleParserError("You cannot use loops on 'import_role' statements. You should use 'include_role' instead.", obj=task_ds)
+ else:
+ raise AnsibleParserError("You cannot use 'static' on an include_role with a loop", obj=task_ds)
+
+ # we set a flag to indicate this include was static
+ ir.statically_loaded = True
+
+ # template the role name now, if needed
+ all_vars = variable_manager.get_vars(play=play, task=ir)
+ templar = Templar(loader=loader, variables=all_vars)
+ ir._role_name = templar.template(ir._role_name)
+
+ # uses compiled list from object
+ blocks, _ = ir.get_block_list(variable_manager=variable_manager, loader=loader)
+ task_list.extend(blocks)
+ else:
+ # passes task object itself for latter generation of list
+ task_list.append(ir)
+ else:
+ if use_handlers:
+ t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+ else:
+ t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+
+ task_list.append(t)
+
+ return task_list
+
+
+def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None, collection_search_list=None):
+ """
+ Loads and returns a list of RoleInclude objects from the ds list of role definitions
+ :param ds: list of roles to load
+ :param play: calling Play object
+ :param current_role_path: path of the owning role, if any
+ :param variable_manager: varmgr to use for templating
+ :param loader: loader to use for DS parsing/services
+ :param collection_search_list: list of collections to search for unqualified role names
+ :return:
+ """
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.role.include import RoleInclude
+
+ if not isinstance(ds, list):
+ raise AnsibleAssertionError('ds (%s) should be a list but was a %s' % (ds, type(ds)))
+
+ roles = []
+ for role_def in ds:
+ i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager,
+ loader=loader, collection_list=collection_search_list)
+ roles.append(i)
+
+ return roles
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
new file mode 100644
index 0000000..409eaec
--- /dev/null
+++ b/lib/ansible/playbook/included_file.py
@@ -0,0 +1,223 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.executor.task_executor import remove_omit
+from ansible.module_utils._text import to_text
+from ansible.playbook.handler import Handler
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role_include import IncludeRole
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class IncludedFile:
+
+ def __init__(self, filename, args, vars, task, is_role=False):
+ self._filename = filename
+ self._args = args
+ self._vars = vars
+ self._task = task
+ self._hosts = []
+ self._is_role = is_role
+ self._results = []
+
+ def add_host(self, host):
+ if host not in self._hosts:
+ self._hosts.append(host)
+ return
+ raise ValueError()
+
+ def __eq__(self, other):
+ return (other._filename == self._filename and
+ other._args == self._args and
+ other._vars == self._vars and
+ other._task._uuid == self._task._uuid and
+ other._task._parent._uuid == self._task._parent._uuid)
+
+ def __repr__(self):
+ return "%s (args=%s vars=%s): %s" % (self._filename, self._args, self._vars, self._hosts)
+
+ @staticmethod
+ def process_include_results(results, iterator, loader, variable_manager):
+ included_files = []
+ task_vars_cache = {}
+
+ for res in results:
+
+ original_host = res._host
+ original_task = res._task
+
+ if original_task.action in C._ACTION_ALL_INCLUDES:
+ if original_task.action in C._ACTION_INCLUDE:
+ display.deprecated('"include" is deprecated, use include_tasks/import_tasks/import_playbook instead', "2.16")
+
+ if original_task.loop:
+ if 'results' not in res._result:
+ continue
+ include_results = res._result['results']
+ else:
+ include_results = [res._result]
+
+ for include_result in include_results:
+ # if the task result was skipped or failed, continue
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
+ continue
+
+ cache_key = (iterator._play, original_host, original_task)
+ try:
+ task_vars = task_vars_cache[cache_key]
+ except KeyError:
+ task_vars = task_vars_cache[cache_key] = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task)
+
+ include_args = include_result.get('include_args', dict())
+ special_vars = {}
+ loop_var = include_result.get('ansible_loop_var', 'item')
+ index_var = include_result.get('ansible_index_var')
+ if loop_var in include_result:
+ task_vars[loop_var] = special_vars[loop_var] = include_result[loop_var]
+ if index_var and index_var in include_result:
+ task_vars[index_var] = special_vars[index_var] = include_result[index_var]
+ if '_ansible_item_label' in include_result:
+ task_vars['_ansible_item_label'] = special_vars['_ansible_item_label'] = include_result['_ansible_item_label']
+ if 'ansible_loop' in include_result:
+ task_vars['ansible_loop'] = special_vars['ansible_loop'] = include_result['ansible_loop']
+ if original_task.no_log and '_ansible_no_log' not in include_args:
+ task_vars['_ansible_no_log'] = special_vars['_ansible_no_log'] = original_task.no_log
+
+ # get search path for this task to pass to lookup plugins that may be used in pathing to
+ # the included file
+ task_vars['ansible_search_path'] = original_task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if loader.get_basedir() not in task_vars['ansible_search_path']:
+ task_vars['ansible_search_path'].append(loader.get_basedir())
+
+ templar = Templar(loader=loader, variables=task_vars)
+
+ if original_task.action in C._ACTION_ALL_INCLUDE_TASKS:
+ include_file = None
+
+ if original_task._parent:
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = original_task._parent
+ cumulative_path = None
+ while parent_include is not None:
+ if not isinstance(parent_include, TaskInclude):
+ parent_include = parent_include._parent
+ continue
+ if isinstance(parent_include, IncludeRole):
+ parent_include_dir = parent_include._role_path
+ else:
+ try:
+ parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
+ except AnsibleError as e:
+ parent_include_dir = ''
+ display.warning(
+ 'Templating the path of the parent %s failed. The path to the '
+ 'included file may not be found. '
+ 'The error was: %s.' % (original_task.action, to_text(e))
+ )
+ if cumulative_path is not None and not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
+ else:
+ cumulative_path = parent_include_dir
+ include_target = templar.template(include_result['include'])
+ if original_task._role:
+ new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
+ candidates = [loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target),
+ loader.path_dwim_relative(new_basedir, 'tasks', include_target)]
+ for include_file in candidates:
+ try:
+ # may throw OSError
+ os.stat(include_file)
+ # or select the task file if it exists
+ break
+ except OSError:
+ pass
+ else:
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
+
+ if os.path.exists(include_file):
+ break
+ else:
+ parent_include = parent_include._parent
+
+ if include_file is None:
+ if original_task._role:
+ include_target = templar.template(include_result['include'])
+ include_file = loader.path_dwim_relative(
+ original_task._role._role_path,
+ 'handlers' if isinstance(original_task, Handler) else 'tasks',
+ include_target,
+ is_role=True)
+ else:
+ include_file = loader.path_dwim(include_result['include'])
+
+ include_file = templar.template(include_file)
+ inc_file = IncludedFile(include_file, include_args, special_vars, original_task)
+ else:
+ # template the included role's name here
+ role_name = include_args.pop('name', include_args.pop('role', None))
+ if role_name is not None:
+ role_name = templar.template(role_name)
+
+ new_task = original_task.copy()
+ new_task._role_name = role_name
+ for from_arg in new_task.FROM_ARGS:
+ if from_arg in include_args:
+ from_key = from_arg.removesuffix('_from')
+ new_task._from_files[from_key] = templar.template(include_args.pop(from_arg))
+
+ omit_token = task_vars.get('omit')
+ if omit_token:
+ new_task._from_files = remove_omit(new_task._from_files, omit_token)
+
+ inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True)
+
+ idx = 0
+ orig_inc_file = inc_file
+ while 1:
+ try:
+ pos = included_files[idx:].index(orig_inc_file)
+ # pos is relative to idx since we are slicing
+ # use idx + pos due to relative indexing
+ inc_file = included_files[idx + pos]
+ except ValueError:
+ included_files.append(orig_inc_file)
+ inc_file = orig_inc_file
+
+ try:
+ inc_file.add_host(original_host)
+ inc_file._results.append(res)
+ except ValueError:
+ # The host already exists for this include, advance forward, this is a new include
+ idx += pos + 1
+ else:
+ break
+
+ return included_files
diff --git a/lib/ansible/playbook/loop_control.py b/lib/ansible/playbook/loop_control.py
new file mode 100644
index 0000000..2f56166
--- /dev/null
+++ b/lib/ansible/playbook/loop_control.py
@@ -0,0 +1,41 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import FieldAttributeBase
+
+
+class LoopControl(FieldAttributeBase):
+
+ loop_var = FieldAttribute(isa='str', default='item', always_post_validate=True)
+ index_var = FieldAttribute(isa='str', always_post_validate=True)
+ label = FieldAttribute(isa='str')
+ pause = FieldAttribute(isa='float', default=0, always_post_validate=True)
+ extended = FieldAttribute(isa='bool', always_post_validate=True)
+ extended_allitems = FieldAttribute(isa='bool', default=True, always_post_validate=True)
+
+ def __init__(self):
+ super(LoopControl, self).__init__()
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ t = LoopControl()
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
new file mode 100644
index 0000000..23bb36b
--- /dev/null
+++ b/lib/ansible/playbook/play.py
@@ -0,0 +1,401 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six import binary_type, string_types, text_type
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.block import Block
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
+from ansible.playbook.role import Role
+from ansible.playbook.task import Task
+from ansible.playbook.taggable import Taggable
+from ansible.vars.manager import preprocess_vars
+from ansible.utils.display import Display
+
+display = Display()
+
+
+__all__ = ['Play']
+
+
+class Play(Base, Taggable, CollectionSearch):
+
+ """
+ A play is a language feature that represents a list of roles and/or
+ task/handler blocks to execute on a given set of hosts.
+
+ Usage:
+
+ Play.load(datastructure) -> Play
+ Play.something(...)
+ """
+
+ # =================================================================================
+ hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True, priority=-2)
+
+ # Facts
+ gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
+
+ # defaults to be deprecated, should be 'None' in future
+ gather_subset = FieldAttribute(isa='list', default=(lambda: C.DEFAULT_GATHER_SUBSET), listof=string_types, always_post_validate=True)
+ gather_timeout = FieldAttribute(isa='int', default=C.DEFAULT_GATHER_TIMEOUT, always_post_validate=True)
+ fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
+
+ # Variable Attributes
+ vars_files = FieldAttribute(isa='list', default=list, priority=99)
+ vars_prompt = FieldAttribute(isa='list', default=list, always_post_validate=False)
+
+ # Role Attributes
+ roles = FieldAttribute(isa='list', default=list, priority=90)
+
+ # Block (Task) Lists Attributes
+ handlers = FieldAttribute(isa='list', default=list, priority=-1)
+ pre_tasks = FieldAttribute(isa='list', default=list, priority=-1)
+ post_tasks = FieldAttribute(isa='list', default=list, priority=-1)
+ tasks = FieldAttribute(isa='list', default=list, priority=-1)
+
+ # Flag/Setting Attributes
+ force_handlers = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
+ max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
+ serial = FieldAttribute(isa='list', default=list, always_post_validate=True)
+ strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
+ order = FieldAttribute(isa='string', always_post_validate=True)
+
+ # =================================================================================
+
+ def __init__(self):
+ super(Play, self).__init__()
+
+ self._included_conditional = None
+ self._included_path = None
+ self._removed_hosts = []
+ self.ROLE_CACHE = {}
+
+ self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
+ self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
+
+ self._action_groups = {}
+ self._group_actions = {}
+
+ def __repr__(self):
+ return self.get_name()
+
+ def _validate_hosts(self, attribute, name, value):
+ # Only validate 'hosts' if a value was passed in to original data set.
+ if 'hosts' in self._ds:
+ if not value:
+ raise AnsibleParserError("Hosts list cannot be empty. Please check your playbook")
+
+ if is_sequence(value):
+ # Make sure each item in the sequence is a valid string
+ for entry in value:
+ if entry is None:
+ raise AnsibleParserError("Hosts list cannot contain values of 'None'. Please check your playbook")
+ elif not isinstance(entry, (binary_type, text_type)):
+ raise AnsibleParserError("Hosts list contains an invalid host value: '{host!s}'".format(host=entry))
+
+ elif not isinstance(value, (binary_type, text_type)):
+ raise AnsibleParserError("Hosts list must be a sequence or string. Please check your playbook.")
+
+ def get_name(self):
+ ''' return the name of the Play '''
+ if self.name:
+ return self.name
+
+ if is_sequence(self.hosts):
+ self.name = ','.join(self.hosts)
+ else:
+ self.name = self.hosts or ''
+
+ return self.name
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None, vars=None):
+ p = Play()
+ if vars:
+ p.vars = vars.copy()
+ return p.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def preprocess_data(self, ds):
+ '''
+ Adjusts play datastructure to cleanup old/legacy items
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
+
+ # The use of 'user' in the Play datastructure was deprecated to
+ # line up with the same change for Tasks, due to the fact that
+ # 'user' conflicted with the user module.
+ if 'user' in ds:
+ # this should never happen, but error out with a helpful message
+ # to the user if it does...
+ if 'remote_user' in ds:
+ raise AnsibleParserError("both 'user' and 'remote_user' are set for this play. "
+ "The use of 'user' is deprecated, and should be removed", obj=ds)
+
+ ds['remote_user'] = ds['user']
+ del ds['user']
+
+ return super(Play, self).preprocess_data(ds)
+
+ def _load_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
+
+ def _load_pre_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
+
+ def _load_post_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
+
+ def _load_handlers(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed handlers/blocks.
+ Bare handlers outside of a block are given an implicit block.
+ '''
+ try:
+ return self._extend_value(
+ self.handlers,
+ load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
+ prepend=True
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
+
+ def _load_roles(self, attr, ds):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions and creates the Role from those objects
+ '''
+
+ if ds is None:
+ ds = []
+
+ try:
+ role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
+ loader=self._loader, collection_search_list=self.collections)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
+
+ roles = []
+ for ri in role_includes:
+ roles.append(Role.load(ri, play=self))
+
+ self.roles[:0] = roles
+
+ return self.roles
+
+ def _load_vars_prompt(self, attr, ds):
+ new_ds = preprocess_vars(ds)
+ vars_prompts = []
+ if new_ds is not None:
+ for prompt_data in new_ds:
+ if 'name' not in prompt_data:
+ raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
+ for key in prompt_data:
+ if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
+ raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
+ vars_prompts.append(prompt_data)
+ return vars_prompts
+
+ def _compile_roles(self):
+ '''
+ Handles the role compilation step, returning a flat list of tasks
+ with the lowest level dependencies first. For example, if a role R
+ has a dependency D1, which also has a dependency D2, the tasks from
+ D2 are merged first, followed by D1, and lastly by the tasks from
+ the parent role R last. This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ # Don't insert tasks from ``import/include_role``, preventing
+ # duplicate execution at the wrong time
+ if r.from_include:
+ continue
+ block_list.extend(r.compile(play=self))
+
+ return block_list
+
+ def compile_roles_handlers(self):
+ '''
+ Handles the role handler compilation step, returning a flat list of Handlers
+ This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ if r.from_include:
+ continue
+ block_list.extend(r.get_handler_blocks(play=self))
+
+ return block_list
+
+ def compile(self):
+ '''
+ Compiles and returns the task list for this play, compiled from the
+ roles (which are themselves compiled recursively) and/or the list of
+ tasks specified in the play.
+ '''
+
+ # create a block containing a single flush handlers meta
+ # task, so we can be sure to run handlers at certain points
+ # of the playbook execution
+ flush_block = Block.load(
+ data={'meta': 'flush_handlers'},
+ play=self,
+ variable_manager=self._variable_manager,
+ loader=self._loader
+ )
+
+ for task in flush_block.block:
+ task.implicit = True
+
+ block_list = []
+ if self.force_handlers:
+ noop_task = Task()
+ noop_task.action = 'meta'
+ noop_task.args['_raw_params'] = 'noop'
+ noop_task.implicit = True
+ noop_task.set_loader(self._loader)
+
+ b = Block(play=self)
+ b.block = self.pre_tasks or [noop_task]
+ b.always = [flush_block]
+ block_list.append(b)
+
+ tasks = self._compile_roles() + self.tasks
+ b = Block(play=self)
+ b.block = tasks or [noop_task]
+ b.always = [flush_block]
+ block_list.append(b)
+
+ b = Block(play=self)
+ b.block = self.post_tasks or [noop_task]
+ b.always = [flush_block]
+ block_list.append(b)
+
+ return block_list
+
+ block_list.extend(self.pre_tasks)
+ block_list.append(flush_block)
+ block_list.extend(self._compile_roles())
+ block_list.extend(self.tasks)
+ block_list.append(flush_block)
+ block_list.extend(self.post_tasks)
+ block_list.append(flush_block)
+
+ return block_list
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def get_vars_files(self):
+ if self.vars_files is None:
+ return []
+ elif not isinstance(self.vars_files, list):
+ return [self.vars_files]
+ return self.vars_files
+
+ def get_handlers(self):
+ return self.handlers[:]
+
+ def get_roles(self):
+ return self.roles[:]
+
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
+ else:
+ tasklist.append(task)
+ return tasklist
+
+ def serialize(self):
+ data = super(Play, self).serialize()
+
+ roles = []
+ for role in self.get_roles():
+ roles.append(role.serialize())
+ data['roles'] = roles
+ data['included_path'] = self._included_path
+ data['action_groups'] = self._action_groups
+ data['group_actions'] = self._group_actions
+
+ return data
+
+ def deserialize(self, data):
+ super(Play, self).deserialize(data)
+
+ self._included_path = data.get('included_path', None)
+ self._action_groups = data.get('action_groups', {})
+ self._group_actions = data.get('group_actions', {})
+ if 'roles' in data:
+ role_data = data.get('roles', [])
+ roles = []
+ for role in role_data:
+ r = Role()
+ r.deserialize(role)
+ roles.append(r)
+
+ setattr(self, 'roles', roles)
+ del data['roles']
+
+ def copy(self):
+ new_me = super(Play, self).copy()
+ new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
+ new_me._included_conditional = self._included_conditional
+ new_me._included_path = self._included_path
+ new_me._action_groups = self._action_groups
+ new_me._group_actions = self._group_actions
+ return new_me
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
new file mode 100644
index 0000000..90de929
--- /dev/null
+++ b/lib/ansible/playbook/play_context.py
@@ -0,0 +1,354 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible import context
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.utils.display import Display
+from ansible.utils.ssh_functions import check_for_controlpersist
+
+
+display = Display()
+
+
+__all__ = ['PlayContext']
+
+
+TASK_ATTRIBUTE_OVERRIDES = (
+ 'become',
+ 'become_user',
+ 'become_pass',
+ 'become_method',
+ 'become_flags',
+ 'connection',
+ 'docker_extra_args', # TODO: remove
+ 'delegate_to',
+ 'no_log',
+ 'remote_user',
+)
+
+RESET_VARS = (
+ 'ansible_connection',
+ 'ansible_user',
+ 'ansible_host',
+ 'ansible_port',
+
+ # TODO: ???
+ 'ansible_docker_extra_args',
+ 'ansible_ssh_host',
+ 'ansible_ssh_pass',
+ 'ansible_ssh_port',
+ 'ansible_ssh_user',
+ 'ansible_ssh_private_key_file',
+ 'ansible_ssh_pipelining',
+ 'ansible_ssh_executable',
+)
+
+
+class PlayContext(Base):
+
+ '''
+ This class is used to consolidate the connection information for
+ hosts in a play and child tasks, where the task may override some
+ connection/authentication information.
+ '''
+
+ # base
+ module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
+ shell = FieldAttribute(isa='string')
+ executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
+
+ # connection fields, some are inherited from Base:
+ # (connection, port, remote_user, environment, no_log)
+ remote_addr = FieldAttribute(isa='string')
+ password = FieldAttribute(isa='string')
+ timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
+ connection_user = FieldAttribute(isa='string')
+ private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
+ pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
+
+ # networking modules
+ network_os = FieldAttribute(isa='string')
+
+ # docker FIXME: remove these
+ docker_extra_args = FieldAttribute(isa='string')
+
+ # ???
+ connection_lockfd = FieldAttribute(isa='int')
+
+ # privilege escalation fields
+ become = FieldAttribute(isa='bool')
+ become_method = FieldAttribute(isa='string')
+ become_user = FieldAttribute(isa='string')
+ become_pass = FieldAttribute(isa='string')
+ become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
+ become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
+ prompt = FieldAttribute(isa='string')
+
+ # general flags
+ only_tags = FieldAttribute(isa='set', default=set)
+ skip_tags = FieldAttribute(isa='set', default=set)
+
+ start_at_task = FieldAttribute(isa='string')
+ step = FieldAttribute(isa='bool', default=False)
+
+ # "PlayContext.force_handlers should not be used, the calling code should be using play itself instead"
+ force_handlers = FieldAttribute(isa='bool', default=False)
+
+ @property
+ def verbosity(self):
+ display.deprecated(
+ "PlayContext.verbosity is deprecated, use ansible.utils.display.Display.verbosity instead.",
+ version=2.18
+ )
+ return self._internal_verbosity
+
+ @verbosity.setter
+ def verbosity(self, value):
+ display.deprecated(
+ "PlayContext.verbosity is deprecated, use ansible.utils.display.Display.verbosity instead.",
+ version=2.18
+ )
+ self._internal_verbosity = value
+
+ def __init__(self, play=None, passwords=None, connection_lockfd=None):
+ # Note: play is really not optional. The only time it could be omitted is when we create
+ # a PlayContext just so we can invoke its deserialize method to load it from a serialized
+ # data source.
+
+ super(PlayContext, self).__init__()
+
+ if passwords is None:
+ passwords = {}
+
+ self.password = passwords.get('conn_pass', '')
+ self.become_pass = passwords.get('become_pass', '')
+
+ self._become_plugin = None
+
+ self.prompt = ''
+ self.success_key = ''
+
+ # a file descriptor to be used during locking operations
+ self.connection_lockfd = connection_lockfd
+
+ # set options before play to allow play to override them
+ if context.CLIARGS:
+ self.set_attributes_from_cli()
+ else:
+ self._internal_verbosity = 0
+
+ if play:
+ self.set_attributes_from_play(play)
+
+ def set_attributes_from_plugin(self, plugin):
+ # generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
+
+ # get options for plugins
+ options = C.config.get_configuration_definitions(plugin.plugin_type, plugin._load_name)
+ for option in options:
+ if option:
+ flag = options[option].get('name')
+ if flag:
+ setattr(self, flag, plugin.get_option(flag))
+
+ def set_attributes_from_play(self, play):
+ self.force_handlers = play.force_handlers
+
+ def set_attributes_from_cli(self):
+ '''
+ Configures this connection information instance with data from
+ options specified by the user on the command line. These have a
+ lower precedence than those set on the play or host.
+ '''
+ if context.CLIARGS.get('timeout', False):
+ self.timeout = int(context.CLIARGS['timeout'])
+
+ # From the command line. These should probably be used directly by plugins instead
+ # For now, they are likely to be moved to FieldAttribute defaults
+ self.private_key_file = context.CLIARGS.get('private_key_file') # Else default
+ self._internal_verbosity = context.CLIARGS.get('verbosity') # Else default
+
+ # Not every cli that uses PlayContext has these command line args so have a default
+ self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
+
+ def set_task_and_variable_override(self, task, variables, templar):
+ '''
+ Sets attributes from the task if they are set, which will override
+ those from the play.
+
+ :arg task: the task object with the parameters that were set on it
+ :arg variables: variables from inventory
+ :arg templar: templar instance if templating variables is needed
+ '''
+
+ new_info = self.copy()
+
+ # loop through a subset of attributes on the task object and set
+ # connection fields based on their values
+ for attr in TASK_ATTRIBUTE_OVERRIDES:
+ if (attr_val := getattr(task, attr, None)) is not None:
+ setattr(new_info, attr, attr_val)
+
+ # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
+ # connection info object with 'magic' variables from the variable list.
+ # If the value 'ansible_delegated_vars' is in the variables, it means
+ # we have a delegated-to host, so we check there first before looking
+ # at the variables in general
+ if task.delegate_to is not None:
+ # In the case of a loop, the delegated_to host may have been
+ # templated based on the loop variable, so we try and locate
+ # the host name in the delegated variable dictionary here
+ delegated_host_name = templar.template(task.delegate_to)
+ delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
+
+ delegated_transport = C.DEFAULT_TRANSPORT
+ for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'):
+ if transport_var in delegated_vars:
+ delegated_transport = delegated_vars[transport_var]
+ break
+
+ # make sure this delegated_to host has something set for its remote
+ # address, otherwise we default to connecting to it by name. This
+ # may happen when users put an IP entry into their inventory, or if
+ # they rely on DNS for a non-inventory hostname
+ for address_var in ('ansible_%s_host' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_addr'):
+ if address_var in delegated_vars:
+ break
+ else:
+ display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
+ delegated_vars['ansible_host'] = delegated_host_name
+
+ # reset the port back to the default if none was specified, to prevent
+ # the delegated host from inheriting the original host's setting
+ for port_var in ('ansible_%s_port' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('port'):
+ if port_var in delegated_vars:
+ break
+ else:
+ if delegated_transport == 'winrm':
+ delegated_vars['ansible_port'] = 5986
+ else:
+ delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
+
+ # and likewise for the remote user
+ for user_var in ('ansible_%s_user' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_user'):
+ if user_var in delegated_vars and delegated_vars[user_var]:
+ break
+ else:
+ delegated_vars['ansible_user'] = task.remote_user or self.remote_user
+ else:
+ delegated_vars = dict()
+
+ # setup shell
+ for exe_var in C.MAGIC_VARIABLE_MAPPING.get('executable'):
+ if exe_var in variables:
+ setattr(new_info, 'executable', variables.get(exe_var))
+
+ attrs_considered = []
+ for (attr, variable_names) in C.MAGIC_VARIABLE_MAPPING.items():
+ for variable_name in variable_names:
+ if attr in attrs_considered:
+ continue
+ # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
+ if task.delegate_to is not None:
+ if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
+ setattr(new_info, attr, delegated_vars[variable_name])
+ attrs_considered.append(attr)
+ elif variable_name in variables:
+ setattr(new_info, attr, variables[variable_name])
+ attrs_considered.append(attr)
+ # no else, as no other vars should be considered
+
+ # become legacy updates -- from inventory file (inventory overrides
+ # commandline)
+ for become_pass_name in C.MAGIC_VARIABLE_MAPPING.get('become_pass'):
+ if become_pass_name in variables:
+ break
+
+ # make sure we get port defaults if needed
+ if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
+ new_info.port = int(C.DEFAULT_REMOTE_PORT)
+
+ # special overrides for the connection setting
+ if len(delegated_vars) > 0:
+ # in the event that we were using local before make sure to reset the
+ # connection type to the default transport for the delegated-to host,
+ # if not otherwise specified
+ for connection_type in C.MAGIC_VARIABLE_MAPPING.get('connection'):
+ if connection_type in delegated_vars:
+ break
+ else:
+ remote_addr_local = new_info.remote_addr in C.LOCALHOST
+ inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
+ if remote_addr_local and inv_hostname_local:
+ setattr(new_info, 'connection', 'local')
+ elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
+ setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
+
+ # we store original in 'connection_user' for use of network/other modules that fallback to it as login user
+ # connection_user to be deprecated once connection=local is removed for, as local resets remote_user
+ if new_info.connection == 'local':
+ if not new_info.connection_user:
+ new_info.connection_user = new_info.remote_user
+
+ # for case in which connection plugin still uses pc.remote_addr and in it's own options
+ # specifies 'default: inventory_hostname', but never added to vars:
+ if new_info.remote_addr == 'inventory_hostname':
+ new_info.remote_addr = variables.get('inventory_hostname')
+ display.warning('The "%s" connection plugin has an improperly configured remote target value, '
+ 'forcing "inventory_hostname" templated value instead of the string' % new_info.connection)
+
+ # set no_log to default if it was not previously set
+ if new_info.no_log is None:
+ new_info.no_log = C.DEFAULT_NO_LOG
+
+ if task.check_mode is not None:
+ new_info.check_mode = task.check_mode
+
+ if task.diff is not None:
+ new_info.diff = task.diff
+
+ return new_info
+
+ def set_become_plugin(self, plugin):
+ self._become_plugin = plugin
+
+ def update_vars(self, variables):
+ '''
+ Adds 'magic' variables relating to connections to the variable dictionary provided.
+ In case users need to access from the play, this is a legacy from runner.
+ '''
+
+ for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
+ try:
+ if 'become' in prop:
+ continue
+
+ var_val = getattr(self, prop)
+ for var_opt in var_list:
+ if var_opt not in variables and var_val is not None:
+ variables[var_opt] = var_val
+ except AttributeError:
+ continue
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
new file mode 100644
index 0000000..03210ea
--- /dev/null
+++ b/lib/ansible/playbook/playbook_include.py
@@ -0,0 +1,171 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import string_types
+from ansible.parsing.splitter import split_args, parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PlaybookInclude(Base, Conditional, Taggable):
+
+ import_playbook = FieldAttribute(isa='string')
+ vars_val = FieldAttribute(isa='dict', default=dict, alias='vars')
+
+ @staticmethod
+ def load(data, basedir, variable_manager=None, loader=None):
+ return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
+
+ def load_data(self, ds, basedir, variable_manager=None, loader=None):
+ '''
+ Overrides the base load_data(), as we're actually going to return a new
+ Playbook() object rather than a PlaybookInclude object
+ '''
+
+ # import here to avoid a dependency loop
+ from ansible.playbook import Playbook
+ from ansible.playbook.play import Play
+
+ # first, we use the original parent method to correctly load the object
+ # via the load_data/preprocess_data system we normally use for other
+ # playbook objects
+ new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
+
+ all_vars = self.vars.copy()
+ if variable_manager:
+ all_vars |= variable_manager.get_vars()
+
+ templar = Templar(loader=loader, variables=all_vars)
+
+ # then we use the object to load a Playbook
+ pb = Playbook(loader=loader)
+
+ file_name = templar.template(new_obj.import_playbook)
+
+ # check for FQCN
+ resource = _get_collection_playbook_path(file_name)
+ if resource is not None:
+ playbook = resource[1]
+ playbook_collection = resource[2]
+ else:
+ # not FQCN try path
+ playbook = file_name
+ if not os.path.isabs(playbook):
+ playbook = os.path.join(basedir, playbook)
+
+ # might still be collection playbook
+ playbook_collection = _get_collection_name_from_path(playbook)
+
+ if playbook_collection:
+ # it is a collection playbook, setup default collections
+ AnsibleCollectionConfig.default_collection = playbook_collection
+ else:
+ # it is NOT a collection playbook, setup adjecent paths
+ AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))
+
+ pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())
+
+ # finally, update each loaded playbook entry with any variables specified
+ # on the included playbook and/or any tags which may have been set
+ for entry in pb._entries:
+
+ # conditional includes on a playbook need a marker to skip gathering
+ if new_obj.when and isinstance(entry, Play):
+ entry._included_conditional = new_obj.when[:]
+
+ temp_vars = entry.vars | new_obj.vars
+ param_tags = temp_vars.pop('tags', None)
+ if param_tags is not None:
+ entry.tags.extend(param_tags.split(','))
+ entry.vars = temp_vars
+ entry.tags = list(set(entry.tags).union(new_obj.tags))
+ if entry._included_path is None:
+ entry._included_path = os.path.dirname(playbook)
+
+ # Check to see if we need to forward the conditionals on to the included
+ # plays. If so, we can take a shortcut here and simply prepend them to
+ # those attached to each block (if any)
+ if new_obj.when:
+ for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
+ task_block._when = new_obj.when[:] + task_block.when[:]
+
+ return pb
+
+ def preprocess_data(self, ds):
+ '''
+ Regorganizes the data for a PlaybookInclude datastructure to line
+ up with what we expect the proper attributes to be
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ for (k, v) in ds.items():
+ if k in C._ACTION_IMPORT_PLAYBOOK:
+ self._preprocess_import(ds, new_ds, k, v)
+ else:
+ # some basic error checking, to make sure vars are properly
+ # formatted and do not conflict with k=v parameters
+ if k == 'vars':
+ if 'vars' in new_ds:
+ raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
+ elif not isinstance(v, dict):
+ raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
+ new_ds[k] = v
+
+ return super(PlaybookInclude, self).preprocess_data(new_ds)
+
+ def _preprocess_import(self, ds, new_ds, k, v):
+ '''
+ Splits the playbook import line up into filename and parameters
+ '''
+ if v is None:
+ raise AnsibleParserError("playbook import parameter is missing", obj=ds)
+ elif not isinstance(v, string_types):
+ raise AnsibleParserError("playbook import parameter must be a string indicating a file path, got %s instead" % type(v), obj=ds)
+
+ # The import_playbook line must include at least one item, which is the filename
+ # to import. Anything after that should be regarded as a parameter to the import
+ items = split_args(v)
+ if len(items) == 0:
+ raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
+
+ new_ds['import_playbook'] = items[0].strip()
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
new file mode 100644
index 0000000..0409609
--- /dev/null
+++ b/lib/ansible/playbook/role/__init__.py
@@ -0,0 +1,664 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from collections.abc import Container, Mapping, Set, Sequence
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import binary_type, text_type
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.role.metadata import RoleMetadata
+from ansible.playbook.taggable import Taggable
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.path import is_subpath
+from ansible.utils.sentinel import Sentinel
+from ansible.utils.vars import combine_vars
+
+__all__ = ['Role', 'hash_params']
+
+# TODO: this should be a utility function, but can't be a member of
+# the role due to the fact that it would require the use of self
+# in a static method. This is also used in the base class for
+# strategies (ansible/plugins/strategy/__init__.py)
+
+
+def hash_params(params):
+ """
+ Construct a data structure of parameters that is hashable.
+
+ This requires changing any mutable data structures into immutable ones.
+ We chose a frozenset because role parameters have to be unique.
+
+ .. warning:: this does not handle unhashable scalars. Two things
+ mitigate that limitation:
+
+ 1) There shouldn't be any unhashable scalars specified in the yaml
+ 2) Our only choice would be to return an error anyway.
+ """
+ # Any container is unhashable if it contains unhashable items (for
+ # instance, tuple() is a Hashable subclass but if it contains a dict, it
+ # cannot be hashed)
+ if isinstance(params, Container) and not isinstance(params, (text_type, binary_type)):
+ if isinstance(params, Mapping):
+ try:
+ # Optimistically hope the contents are all hashable
+ new_params = frozenset(params.items())
+ except TypeError:
+ new_params = set()
+ for k, v in params.items():
+ # Hash each entry individually
+ new_params.add((k, hash_params(v)))
+ new_params = frozenset(new_params)
+
+ elif isinstance(params, (Set, Sequence)):
+ try:
+ # Optimistically hope the contents are all hashable
+ new_params = frozenset(params)
+ except TypeError:
+ new_params = set()
+ for v in params:
+ # Hash each entry individually
+ new_params.add(hash_params(v))
+ new_params = frozenset(new_params)
+ else:
+ # This is just a guess.
+ new_params = frozenset(params)
+ return new_params
+
+ # Note: We do not handle unhashable scalars but our only choice would be
+ # to raise an error there anyway.
+ return frozenset((params,))
+
+
+class Role(Base, Conditional, Taggable, CollectionSearch):
+
+ delegate_to = FieldAttribute(isa='string')
+ delegate_facts = FieldAttribute(isa='bool')
+
+ def __init__(self, play=None, from_files=None, from_include=False, validate=True):
+ self._role_name = None
+ self._role_path = None
+ self._role_collection = None
+ self._role_params = dict()
+ self._loader = None
+
+ self._metadata = None
+ self._play = play
+ self._parents = []
+ self._dependencies = []
+ self._task_blocks = []
+ self._handler_blocks = []
+ self._compiled_handler_blocks = None
+ self._default_vars = dict()
+ self._role_vars = dict()
+ self._had_task_run = dict()
+ self._completed = dict()
+ self._should_validate = validate
+
+ if from_files is None:
+ from_files = {}
+ self._from_files = from_files
+
+ # Indicates whether this role was included via include/import_role
+ self.from_include = from_include
+
+ super(Role, self).__init__()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self, include_role_fqcn=True):
+ if include_role_fqcn:
+ return '.'.join(x for x in (self._role_collection, self._role_name) if x)
+ return self._role_name
+
+ @staticmethod
+ def load(role_include, play, parent_role=None, from_files=None, from_include=False, validate=True):
+
+ if from_files is None:
+ from_files = {}
+ try:
+ # The ROLE_CACHE is a dictionary of role names, with each entry
+ # containing another dictionary corresponding to a set of parameters
+ # specified for a role as the key and the Role() object itself.
+ # We use frozenset to make the dictionary hashable.
+
+ params = role_include.get_role_params()
+ if role_include.when is not None:
+ params['when'] = role_include.when
+ if role_include.tags is not None:
+ params['tags'] = role_include.tags
+ if from_files is not None:
+ params['from_files'] = from_files
+ if role_include.vars:
+ params['vars'] = role_include.vars
+
+ params['from_include'] = from_include
+
+ hashed_params = hash_params(params)
+ if role_include.get_name() in play.ROLE_CACHE:
+ for (entry, role_obj) in play.ROLE_CACHE[role_include.get_name()].items():
+ if hashed_params == entry:
+ if parent_role:
+ role_obj.add_parent(parent_role)
+ return role_obj
+
+ # TODO: need to fix cycle detection in role load (maybe use an empty dict
+ # for the in-flight in role cache as a sentinel that we're already trying to load
+ # that role?)
+ # see https://github.com/ansible/ansible/issues/61527
+ r = Role(play=play, from_files=from_files, from_include=from_include, validate=validate)
+ r._load_role_data(role_include, parent_role=parent_role)
+
+ if role_include.get_name() not in play.ROLE_CACHE:
+ play.ROLE_CACHE[role_include.get_name()] = dict()
+
+ # FIXME: how to handle cache keys for collection-based roles, since they're technically adjustable per task?
+ play.ROLE_CACHE[role_include.get_name()][hashed_params] = r
+ return r
+
+ except RuntimeError:
+ raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
+ obj=role_include._ds)
+
+ def _load_role_data(self, role_include, parent_role=None):
+ self._role_name = role_include.role
+ self._role_path = role_include.get_role_path()
+ self._role_collection = role_include._role_collection
+ self._role_params = role_include.get_role_params()
+ self._variable_manager = role_include.get_variable_manager()
+ self._loader = role_include.get_loader()
+
+ if parent_role:
+ self.add_parent(parent_role)
+
+ # copy over all field attributes from the RoleInclude
+ # update self._attr directly, to avoid squashing
+ for attr_name in self.fattributes:
+ setattr(self, f'_{attr_name}', getattr(role_include, f'_{attr_name}', Sentinel))
+
+ # vars and default vars are regular dictionaries
+ self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
+ if self._role_vars is None:
+ self._role_vars = {}
+ elif not isinstance(self._role_vars, Mapping):
+ raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+
+ self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
+ if self._default_vars is None:
+ self._default_vars = {}
+ elif not isinstance(self._default_vars, Mapping):
+ raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+
+ # load the role's other files, if they exist
+ metadata = self._load_role_yaml('meta')
+ if metadata:
+ self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
+ self._dependencies = self._load_dependencies()
+ else:
+ self._metadata = RoleMetadata()
+
+ # reset collections list; roles do not inherit collections from parents, just use the defaults
+ # FUTURE: use a private config default for this so we can allow it to be overridden later
+ self.collections = []
+
+ # configure plugin/collection loading; either prepend the current role's collection or configure legacy plugin loading
+ # FIXME: need exception for explicit ansible.legacy?
+ if self._role_collection: # this is a collection-hosted role
+ self.collections.insert(0, self._role_collection)
+ else: # this is a legacy role, but set the default collection if there is one
+ default_collection = AnsibleCollectionConfig.default_collection
+ if default_collection:
+ self.collections.insert(0, default_collection)
+ # legacy role, ensure all plugin dirs under the role are added to plugin search path
+ add_all_plugin_dirs(self._role_path)
+
+ # collections can be specified in metadata for legacy or collection-hosted roles
+ if self._metadata.collections:
+ self.collections.extend((c for c in self._metadata.collections if c not in self.collections))
+
+ # if any collections were specified, ensure that core or legacy synthetic collections are always included
+ if self.collections:
+ # default append collection is core for collection-hosted roles, legacy for others
+ default_append_collection = 'ansible.builtin' if self._role_collection else 'ansible.legacy'
+ if 'ansible.builtin' not in self.collections and 'ansible.legacy' not in self.collections:
+ self.collections.append(default_append_collection)
+
+ task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
+
+ if self._should_validate:
+ role_argspecs = self._get_role_argspecs()
+ task_data = self._prepend_validation_task(task_data, role_argspecs)
+
+ if task_data:
+ try:
+ self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
+ except AssertionError as e:
+ raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
+ obj=task_data, orig_exc=e)
+
+ handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
+ if handler_data:
+ try:
+ self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
+ variable_manager=self._variable_manager)
+ except AssertionError as e:
+ raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
+ obj=handler_data, orig_exc=e)
+
+ def _get_role_argspecs(self):
+ """Get the role argument spec data.
+
+ Role arg specs can be in one of two files in the role meta subdir: argument_specs.yml
+ or main.yml. The former has precedence over the latter. Data is not combined
+ between the files.
+
+ :returns: A dict of all data under the top-level ``argument_specs`` YAML key
+ in the argument spec file. An empty dict is returned if there is no
+ argspec data.
+ """
+ base_argspec_path = os.path.join(self._role_path, 'meta', 'argument_specs')
+
+ for ext in C.YAML_FILENAME_EXTENSIONS:
+ full_path = base_argspec_path + ext
+ if self._loader.path_exists(full_path):
+ # Note: _load_role_yaml() takes care of rebuilding the path.
+ argument_specs = self._load_role_yaml('meta', main='argument_specs')
+ try:
+ return argument_specs.get('argument_specs') or {}
+ except AttributeError:
+ return {}
+
+ # We did not find the meta/argument_specs.[yml|yaml] file, so use the spec
+ # dict from the role meta data, if it exists. Ansible 2.11 and later will
+ # have the 'argument_specs' attribute, but earlier versions will not.
+ return getattr(self._metadata, 'argument_specs', {})
+
+ def _prepend_validation_task(self, task_data, argspecs):
+ '''Insert a role validation task if we have a role argument spec.
+
+ This method will prepend a validation task to the front of the role task
+ list to perform argument spec validation before any other tasks, if an arg spec
+ exists for the entry point. Entry point defaults to `main`.
+
+ :param task_data: List of tasks loaded from the role.
+ :param argspecs: The role argument spec data dict.
+
+ :returns: The (possibly modified) task list.
+ '''
+ if argspecs:
+ # Determine the role entry point so we can retrieve the correct argument spec.
+ # This comes from the `tasks_from` value to include_role or import_role.
+ entrypoint = self._from_files.get('tasks', 'main')
+ entrypoint_arg_spec = argspecs.get(entrypoint)
+
+ if entrypoint_arg_spec:
+ validation_task = self._create_validation_task(entrypoint_arg_spec, entrypoint)
+
+ # Prepend our validate_argument_spec action to happen before any tasks provided by the role.
+ # 'any tasks' can and does include 0 or None tasks, in which cases we create a list of tasks and add our
+ # validate_argument_spec task
+ if not task_data:
+ task_data = []
+ task_data.insert(0, validation_task)
+ return task_data
+
+ def _create_validation_task(self, argument_spec, entrypoint_name):
+ '''Create a new task data structure that uses the validate_argument_spec action plugin.
+
+ :param argument_spec: The arg spec definition for a particular role entry point.
+ This will be the entire arg spec for the entry point as read from the input file.
+ :param entrypoint_name: The name of the role entry point associated with the
+ supplied `argument_spec`.
+ '''
+
+ # If the arg spec provides a short description, use it to flesh out the validation task name
+ task_name = "Validating arguments against arg spec '%s'" % entrypoint_name
+ if 'short_description' in argument_spec:
+ task_name = task_name + ' - ' + argument_spec['short_description']
+
+ return {
+ 'action': {
+ 'module': 'ansible.builtin.validate_argument_spec',
+ # Pass only the 'options' portion of the arg spec to the module.
+ 'argument_spec': argument_spec.get('options', {}),
+ 'provided_arguments': self._role_params,
+ 'validate_args_context': {
+ 'type': 'role',
+ 'name': self._role_name,
+ 'argument_spec_name': entrypoint_name,
+ 'path': self._role_path
+ },
+ },
+ 'name': task_name,
+ 'tags': ['always'],
+ }
+
+ def _load_role_yaml(self, subdir, main=None, allow_dir=False):
+ '''
+ Find and load role YAML files and return data found.
+ :param subdir: subdir of role to search (vars, files, tasks, handlers, defaults)
+ :type subdir: string
+ :param main: filename to match, will default to 'main.<ext>' if not provided.
+ :type main: string
+ :param allow_dir: If true we combine results of multiple matching files found.
+ If false, highlander rules. Only for vars(dicts) and not tasks(lists).
+ :type allow_dir: bool
+
+ :returns: data from the matched file(s), type can be dict or list depending on vars or tasks.
+ '''
+ data = None
+ file_path = os.path.join(self._role_path, subdir)
+ if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
+ # Valid extensions and ordering for roles is hard-coded to maintain portability
+ extensions = ['.yml', '.yaml', '.json'] # same as default for YAML_FILENAME_EXTENSIONS
+
+ # look for files w/o extensions before/after bare name depending on it being set or not
+ # keep 'main' as original to figure out errors if no files found
+ if main is None:
+ _main = 'main'
+ extensions.append('')
+ else:
+ _main = main
+ extensions.insert(0, '')
+
+ # not really 'find_vars_files' but find_files_with_extensions_default_to_yaml_filename_extensions
+ found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
+ if found_files:
+ for found in found_files:
+
+ if not is_subpath(found, file_path):
+ raise AnsibleParserError("Failed loading '%s' for role (%s) as it is not inside the expected role path: '%s'" %
+ (to_text(found), self._role_name, to_text(file_path)))
+
+ new_data = self._loader.load_from_file(found)
+ if new_data:
+ if data is not None and isinstance(new_data, Mapping):
+ data = combine_vars(data, new_data)
+ else:
+ data = new_data
+
+ # found data so no need to continue unless we want to merge
+ if not allow_dir:
+ break
+
+ elif main is not None:
+ # this won't trigger with default only when <subdir>_from is specified
+ raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
+
+ return data
+
+ def _load_dependencies(self):
+ '''
+ Recursively loads role dependencies from the metadata list of
+ dependencies, if it exists
+ '''
+
+ deps = []
+ if self._metadata:
+ for role_include in self._metadata.dependencies:
+ r = Role.load(role_include, play=self._play, parent_role=self)
+ deps.append(r)
+
+ return deps
+
+ # other functions
+
+ def add_parent(self, parent_role):
+ ''' adds a role to the list of this roles parents '''
+ if not isinstance(parent_role, Role):
+ raise AnsibleAssertionError()
+
+ if parent_role not in self._parents:
+ self._parents.append(parent_role)
+
+ def get_parents(self):
+ return self._parents
+
+ def get_default_vars(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ default_vars = dict()
+ for dep in self.get_all_dependencies():
+ default_vars = combine_vars(default_vars, dep.get_default_vars())
+ if dep_chain:
+ for parent in dep_chain:
+ default_vars = combine_vars(default_vars, parent._default_vars)
+ default_vars = combine_vars(default_vars, self._default_vars)
+ return default_vars
+
+ def get_inherited_vars(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ inherited_vars = dict()
+
+ if dep_chain:
+ for parent in dep_chain:
+ inherited_vars = combine_vars(inherited_vars, parent.vars)
+ inherited_vars = combine_vars(inherited_vars, parent._role_vars)
+ return inherited_vars
+
+ def get_role_params(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ params = {}
+ if dep_chain:
+ for parent in dep_chain:
+ params = combine_vars(params, parent._role_params)
+ params = combine_vars(params, self._role_params)
+ return params
+
+ def get_vars(self, dep_chain=None, include_params=True):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ all_vars = self.get_inherited_vars(dep_chain)
+
+ for dep in self.get_all_dependencies():
+ all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params))
+
+ all_vars = combine_vars(all_vars, self.vars)
+ all_vars = combine_vars(all_vars, self._role_vars)
+ if include_params:
+ all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
+
+ return all_vars
+
+ def get_direct_dependencies(self):
+ return self._dependencies[:]
+
+ def get_all_dependencies(self):
+ '''
+ Returns a list of all deps, built recursively from all child dependencies,
+ in the proper order in which they should be executed or evaluated.
+ '''
+
+ child_deps = []
+
+ for dep in self.get_direct_dependencies():
+ for child_dep in dep.get_all_dependencies():
+ child_deps.append(child_dep)
+ child_deps.append(dep)
+
+ return child_deps
+
+ def get_task_blocks(self):
+ return self._task_blocks[:]
+
+ def get_handler_blocks(self, play, dep_chain=None):
+ # Do not recreate this list each time ``get_handler_blocks`` is called.
+ # Cache the results so that we don't potentially overwrite with copied duplicates
+ #
+ # ``get_handler_blocks`` may be called when handling ``import_role`` during parsing
+ # as well as with ``Play.compile_roles_handlers`` from ``TaskExecutor``
+ if self._compiled_handler_blocks:
+ return self._compiled_handler_blocks
+
+ self._compiled_handler_blocks = block_list = []
+
+ # update the dependency chain here
+ if dep_chain is None:
+ dep_chain = []
+ new_dep_chain = dep_chain + [self]
+
+ for dep in self.get_direct_dependencies():
+ dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
+ block_list.extend(dep_blocks)
+
+ for task_block in self._handler_blocks:
+ new_task_block = task_block.copy()
+ new_task_block._dep_chain = new_dep_chain
+ new_task_block._play = play
+ block_list.append(new_task_block)
+
+ return block_list
+
+ def has_run(self, host):
+ '''
+ Returns true if this role has been iterated over completely and
+ at least one task was run
+ '''
+
+ return host.name in self._completed and not self._metadata.allow_duplicates
+
+ def compile(self, play, dep_chain=None):
+ '''
+ Returns the task list for this role, which is created by first
+ recursively compiling the tasks for all direct dependencies, and
+ then adding on the tasks for this role.
+
+ The role compile() also remembers and saves the dependency chain
+ with each task, so tasks know by which route they were found, and
+ can correctly take their parent's tags/conditionals into account.
+ '''
+ from ansible.playbook.block import Block
+ from ansible.playbook.task import Task
+
+ block_list = []
+
+ # update the dependency chain here
+ if dep_chain is None:
+ dep_chain = []
+ new_dep_chain = dep_chain + [self]
+
+ deps = self.get_direct_dependencies()
+ for dep in deps:
+ dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
+ block_list.extend(dep_blocks)
+
+ for task_block in self._task_blocks:
+ new_task_block = task_block.copy()
+ new_task_block._dep_chain = new_dep_chain
+ new_task_block._play = play
+ block_list.append(new_task_block)
+
+ eor_block = Block(play=play)
+ eor_block._loader = self._loader
+ eor_block._role = self
+ eor_block._variable_manager = self._variable_manager
+ eor_block.run_once = False
+
+ eor_task = Task(block=eor_block)
+ eor_task._role = self
+ eor_task.action = 'meta'
+ eor_task.args = {'_raw_params': 'role_complete'}
+ eor_task.implicit = True
+ eor_task.tags = ['always']
+ eor_task.when = True
+
+ eor_block.block = [eor_task]
+ block_list.append(eor_block)
+
+ return block_list
+
+ def serialize(self, include_deps=True):
+ res = super(Role, self).serialize()
+
+ res['_role_name'] = self._role_name
+ res['_role_path'] = self._role_path
+ res['_role_vars'] = self._role_vars
+ res['_role_params'] = self._role_params
+ res['_default_vars'] = self._default_vars
+ res['_had_task_run'] = self._had_task_run.copy()
+ res['_completed'] = self._completed.copy()
+
+ if self._metadata:
+ res['_metadata'] = self._metadata.serialize()
+
+ if include_deps:
+ deps = []
+ for role in self.get_direct_dependencies():
+ deps.append(role.serialize())
+ res['_dependencies'] = deps
+
+ parents = []
+ for parent in self._parents:
+ parents.append(parent.serialize(include_deps=False))
+ res['_parents'] = parents
+
+ return res
+
+ def deserialize(self, data, include_deps=True):
+ self._role_name = data.get('_role_name', '')
+ self._role_path = data.get('_role_path', '')
+ self._role_vars = data.get('_role_vars', dict())
+ self._role_params = data.get('_role_params', dict())
+ self._default_vars = data.get('_default_vars', dict())
+ self._had_task_run = data.get('_had_task_run', dict())
+ self._completed = data.get('_completed', dict())
+
+ if include_deps:
+ deps = []
+ for dep in data.get('_dependencies', []):
+ r = Role()
+ r.deserialize(dep)
+ deps.append(r)
+ setattr(self, '_dependencies', deps)
+
+ parent_data = data.get('_parents', [])
+ parents = []
+ for parent in parent_data:
+ r = Role()
+ r.deserialize(parent, include_deps=False)
+ parents.append(r)
+ setattr(self, '_parents', parents)
+
+ metadata_data = data.get('_metadata')
+ if metadata_data:
+ m = RoleMetadata()
+ m.deserialize(metadata_data)
+ self._metadata = m
+
+ super(Role, self).deserialize(data)
+
+ def set_loader(self, loader):
+ self._loader = loader
+ for parent in self._parents:
+ parent.set_loader(loader)
+ for dep in self.get_direct_dependencies():
+ dep.set_loader(loader)
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
new file mode 100644
index 0000000..b27a231
--- /dev/null
+++ b/lib/ansible/playbook/role/definition.py
@@ -0,0 +1,240 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _get_collection_role_path
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+
+__all__ = ['RoleDefinition']
+
+display = Display()
+
+
+class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
+
+ role = FieldAttribute(isa='string')
+
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
+
+ super(RoleDefinition, self).__init__()
+
+ self._play = play
+ self._variable_manager = variable_manager
+ self._loader = loader
+
+ self._role_path = None
+ self._role_collection = None
+ self._role_basedir = role_basedir
+ self._role_params = dict()
+ self._collection_list = collection_list
+
+ # def __repr__(self):
+ # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ raise AnsibleError("not implemented")
+
+ def preprocess_data(self, ds):
+ # role names that are simply numbers can be parsed by PyYAML
+ # as integers even when quoted, so turn it into a string type
+ if isinstance(ds, int):
+ ds = "%s" % ds
+
+ if not isinstance(ds, dict) and not isinstance(ds, string_types) and not isinstance(ds, AnsibleBaseYAMLObject):
+ raise AnsibleAssertionError()
+
+ if isinstance(ds, dict):
+ ds = super(RoleDefinition, self).preprocess_data(ds)
+
+ # save the original ds for use later
+ self._ds = ds
+
+ # we create a new data structure here, using the same
+ # object used internally by the YAML parsing code so we
+ # can preserve file:line:column information if it exists
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # first we pull the role name out of the data structure,
+ # and then use that to determine the role path (which may
+ # result in a new role name, if it was a file path)
+ role_name = self._load_role_name(ds)
+ (role_name, role_path) = self._load_role_path(role_name)
+
+ # next, we split the role params out from the valid role
+ # attributes and update the new datastructure with that
+ # result and the role name
+ if isinstance(ds, dict):
+ (new_role_def, role_params) = self._split_role_params(ds)
+ new_ds |= new_role_def
+ self._role_params = role_params
+
+ # set the role name in the new ds
+ new_ds['role'] = role_name
+
+ # we store the role path internally
+ self._role_path = role_path
+
+ # and return the cleaned-up data structure
+ return new_ds
+
+ def _load_role_name(self, ds):
+ '''
+ Returns the role name (either the role: or name: field) from
+ the role definition, or (when the role definition is a simple
+ string), just that string
+ '''
+
+ if isinstance(ds, string_types):
+ return ds
+
+ role_name = ds.get('role', ds.get('name'))
+ if not role_name or not isinstance(role_name, string_types):
+ raise AnsibleError('role definitions must contain a role name', obj=ds)
+
+ # if we have the required datastructures, and if the role_name
+ # contains a variable, try and template it now
+ if self._variable_manager:
+ all_vars = self._variable_manager.get_vars(play=self._play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ role_name = templar.template(role_name)
+
+ return role_name
+
+ def _load_role_path(self, role_name):
+ '''
+ the 'role', as specified in the ds (or as a bare string), can either
+ be a simple name or a full path. If it is a full path, we use the
+ basename as the role name, otherwise we take the name as-given and
+ append it to the default role path
+ '''
+
+ # create a templar class to template the dependency names, in
+ # case they contain variables
+ if self._variable_manager is not None:
+ all_vars = self._variable_manager.get_vars(play=self._play)
+ else:
+ all_vars = dict()
+
+ templar = Templar(loader=self._loader, variables=all_vars)
+ role_name = templar.template(role_name)
+
+ role_tuple = None
+
+ # try to load as a collection-based role first
+ if self._collection_list or AnsibleCollectionRef.is_valid_fqcr(role_name):
+ role_tuple = _get_collection_role_path(role_name, self._collection_list)
+
+ if role_tuple:
+ # we found it, stash collection data and return the name/path tuple
+ self._role_collection = role_tuple[2]
+ return role_tuple[0:2]
+
+ # We didn't find a collection role, look in defined role paths
+ # FUTURE: refactor this to be callable from internal so we can properly order
+ # ansible.legacy searches with the collections keyword
+
+ # we always start the search for roles in the base directory of the playbook
+ role_search_paths = [
+ os.path.join(self._loader.get_basedir(), u'roles'),
+ ]
+
+ # also search in the configured roles path
+ if C.DEFAULT_ROLES_PATH:
+ role_search_paths.extend(C.DEFAULT_ROLES_PATH)
+
+ # next, append the roles basedir, if it was set, so we can
+ # search relative to that directory for dependent roles
+ if self._role_basedir:
+ role_search_paths.append(self._role_basedir)
+
+ # finally as a last resort we look in the current basedir as set
+ # in the loader (which should be the playbook dir itself) but without
+ # the roles/ dir appended
+ role_search_paths.append(self._loader.get_basedir())
+
+ # now iterate through the possible paths and return the first one we find
+ for path in role_search_paths:
+ path = templar.template(path)
+ role_path = unfrackpath(os.path.join(path, role_name))
+ if self._loader.path_exists(role_path):
+ return (role_name, role_path)
+
+ # if not found elsewhere try to extract path from name
+ role_path = unfrackpath(role_name)
+ if self._loader.path_exists(role_path):
+ role_name = os.path.basename(role_name)
+ return (role_name, role_path)
+
+ searches = (self._collection_list or []) + role_search_paths
+ raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds)
+
+ def _split_role_params(self, ds):
+ '''
+ Splits any random role params off from the role spec and store
+ them in a dictionary of params for parsing later
+ '''
+
+ role_def = dict()
+ role_params = dict()
+ base_attribute_names = frozenset(self.fattributes)
+ for (key, value) in ds.items():
+ # use the list of FieldAttribute values to determine what is and is not
+ # an extra parameter for this role (or sub-class of this role)
+ # FIXME: hard-coded list of exception key names here corresponds to the
+ # connection fields in the Base class. There may need to be some
+ # other mechanism where we exclude certain kinds of field attributes,
+ # or make this list more automatic in some way so we don't have to
+ # remember to update it manually.
+ if key not in base_attribute_names:
+ # this key does not match a field attribute, so it must be a role param
+ role_params[key] = value
+ else:
+ # this is a field attribute, so copy it over directly
+ role_def[key] = value
+
+ return (role_def, role_params)
+
+ def get_role_params(self):
+ return self._role_params.copy()
+
+ def get_role_path(self):
+ return self._role_path
+
+ def get_name(self, include_role_fqcn=True):
+ if include_role_fqcn:
+ return '.'.join(x for x in (self._role_collection, self.role) if x)
+ return self.role
diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
new file mode 100644
index 0000000..e0d4b67
--- /dev/null
+++ b/lib/ansible/playbook/role/include.py
@@ -0,0 +1,57 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.role.definition import RoleDefinition
+from ansible.module_utils._text import to_native
+
+
+__all__ = ['RoleInclude']
+
+
+class RoleInclude(RoleDefinition):
+
+ """
+ A derivative of RoleDefinition, used by playbook code when a role
+ is included for execution in a play.
+ """
+
+ delegate_to = FieldAttribute(isa='string')
+ delegate_facts = FieldAttribute(isa='bool', default=False)
+
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
+ super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
+ loader=loader, collection_list=collection_list)
+
+ @staticmethod
+ def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
+
+ if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
+ raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
+
+ if isinstance(data, string_types) and ',' in data:
+ raise AnsibleError("Invalid old style role requirement: %s" % data)
+
+ ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
+ return ri.load_data(data, variable_manager=variable_manager, loader=loader)
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
new file mode 100644
index 0000000..275ee54
--- /dev/null
+++ b/lib/ansible/playbook/role/metadata.py
@@ -0,0 +1,130 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError, AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_roles
+from ansible.playbook.role.requirement import RoleRequirement
+
+__all__ = ['RoleMetadata']
+
+
+class RoleMetadata(Base, CollectionSearch):
+ '''
+ This class wraps the parsing and validation of the optional metadata
+ within each Role (meta/main.yml).
+ '''
+
+ allow_duplicates = FieldAttribute(isa='bool', default=False)
+ dependencies = FieldAttribute(isa='list', default=list)
+ galaxy_info = FieldAttribute(isa='GalaxyInfo')
+ argument_specs = FieldAttribute(isa='dict', default=dict)
+
+ def __init__(self, owner=None):
+ self._owner = owner
+ super(RoleMetadata, self).__init__()
+
+ @staticmethod
+ def load(data, owner, variable_manager=None, loader=None):
+ '''
+ Returns a new RoleMetadata object based on the datastructure passed in.
+ '''
+
+ if not isinstance(data, dict):
+ raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
+
+ m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
+ return m
+
+ def _load_dependencies(self, attr, ds):
+ '''
+ This is a helper loading function for the dependencies list,
+ which returns a list of RoleInclude objects
+ '''
+
+ roles = []
+ if ds:
+ if not isinstance(ds, list):
+ raise AnsibleParserError("Expected role dependencies to be a list.", obj=self._ds)
+
+ for role_def in ds:
+ # FIXME: consolidate with ansible-galaxy to keep this in sync
+ if isinstance(role_def, string_types) or 'role' in role_def or 'name' in role_def:
+ roles.append(role_def)
+ continue
+ try:
+ # role_def is new style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ def_parsed = RoleRequirement.role_yaml_parse(role_def)
+ if def_parsed.get('name'):
+ role_def['name'] = def_parsed['name']
+ roles.append(role_def)
+ except AnsibleError as exc:
+ raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
+
+ current_role_path = None
+ collection_search_list = None
+
+ if self._owner:
+ current_role_path = os.path.dirname(self._owner._role_path)
+
+ # if the calling role has a collections search path defined, consult it
+ collection_search_list = self._owner.collections[:] or []
+
+ # if the calling role is a collection role, ensure that its containing collection is searched first
+ owner_collection = self._owner._role_collection
+ if owner_collection:
+ collection_search_list = [c for c in collection_search_list if c != owner_collection]
+ collection_search_list.insert(0, owner_collection)
+ # ensure fallback role search works
+ if 'ansible.legacy' not in collection_search_list:
+ collection_search_list.append('ansible.legacy')
+
+ try:
+ return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path,
+ variable_manager=self._variable_manager, loader=self._loader,
+ collection_search_list=collection_search_list)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
+
+ def _load_galaxy_info(self, attr, ds):
+ '''
+ This is a helper loading function for the galaxy info entry
+ in the metadata, which returns a GalaxyInfo object rather than
+ a simple dictionary.
+ '''
+
+ return ds
+
+ def serialize(self):
+ return dict(
+ allow_duplicates=self._allow_duplicates,
+ dependencies=self._dependencies
+ )
+
+ def deserialize(self, data):
+ setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
+ setattr(self, 'dependencies', data.get('dependencies', []))
diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
new file mode 100644
index 0000000..59e9cf3
--- /dev/null
+++ b/lib/ansible/playbook/role/requirement.py
@@ -0,0 +1,128 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.playbook.role.definition import RoleDefinition
+from ansible.utils.display import Display
+from ansible.utils.galaxy import scm_archive_resource
+
+__all__ = ['RoleRequirement']
+
+VALID_SPEC_KEYS = [
+ 'name',
+ 'role',
+ 'scm',
+ 'src',
+ 'version',
+]
+
+display = Display()
+
+
+class RoleRequirement(RoleDefinition):
+
+ """
+ Helper class for Galaxy, which is used to parse both dependencies
+ specified in meta/main.yml and requirements.yml files.
+ """
+
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def repo_url_to_role_name(repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+ @staticmethod
+ def role_yaml_parse(role):
+
+ if isinstance(role, string_types):
+ name = None
+ scm = None
+ src = None
+ version = None
+ if ',' in role:
+ if role.count(',') == 1:
+ (src, version) = role.strip().split(',', 1)
+ elif role.count(',') == 2:
+ (src, version, name) = role.strip().split(',', 2)
+ else:
+ raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
+ else:
+ src = role
+
+ if name is None:
+ name = RoleRequirement.repo_url_to_role_name(src)
+ if '+' in src:
+ (scm, src) = src.split('+', 1)
+
+ return dict(name=name, src=src, scm=scm, version=version)
+
+ if 'role' in role:
+ name = role['role']
+ if ',' in name:
+ raise AnsibleError("Invalid old style role requirement: %s" % name)
+ else:
+ del role['role']
+ role['name'] = name
+ else:
+ role = role.copy()
+
+ if 'src' in role:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
+ role["src"] = "git+" + role["src"]
+
+ if '+' in role["src"]:
+ role["scm"], dummy, role["src"] = role["src"].partition('+')
+
+ if 'name' not in role:
+ role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
+
+ if 'version' not in role:
+ role['version'] = ''
+
+ if 'scm' not in role:
+ role['scm'] = None
+
+ for key in list(role.keys()):
+ if key not in VALID_SPEC_KEYS:
+ role.pop(key)
+
+ return role
+
+ @staticmethod
+ def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
+
+ return scm_archive_resource(src, scm=scm, name=name, version=version, keep_scm_meta=keep_scm_meta)
diff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py
new file mode 100644
index 0000000..3946037
--- /dev/null
+++ b/lib/ansible/playbook/role_include.py
@@ -0,0 +1,185 @@
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os.path import basename
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.block import Block
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role import Role
+from ansible.playbook.role.include import RoleInclude
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+from ansible.template import Templar
+
+__all__ = ['IncludeRole']
+
+display = Display()
+
+
+class IncludeRole(TaskInclude):
+
+ """
+ A Role include is derived from a regular role to handle the special
+ circumstances related to the `- include_role: ...`
+ """
+
+ BASE = frozenset(('name', 'role')) # directly assigned
+ FROM_ARGS = frozenset(('tasks_from', 'vars_from', 'defaults_from', 'handlers_from')) # used to populate from dict in role
+ OTHER_ARGS = frozenset(('apply', 'public', 'allow_duplicates', 'rolespec_validate')) # assigned to matching property
+ VALID_ARGS = BASE | FROM_ARGS | OTHER_ARGS # all valid args
+
+ # =================================================================================
+ # ATTRIBUTES
+
+ # private as this is a 'module options' vs a task property
+ allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
+ public = FieldAttribute(isa='bool', default=False, private=True)
+ rolespec_validate = FieldAttribute(isa='bool', default=True)
+
+ def __init__(self, block=None, role=None, task_include=None):
+
+ super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
+
+ self._from_files = {}
+ self._parent_role = role
+ self._role_name = None
+ self._role_path = None
+
+ def get_name(self):
+ ''' return the name of the task '''
+ return self.name or "%s : %s" % (self.action, self._role_name)
+
+ def get_block_list(self, play=None, variable_manager=None, loader=None):
+
+ # only need play passed in when dynamic
+ if play is None:
+ myplay = self._parent._play
+ else:
+ myplay = play
+
+ ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader, collection_list=self.collections)
+ ri.vars |= self.vars
+
+ if variable_manager is not None:
+ available_variables = variable_manager.get_vars(play=myplay, task=self)
+ else:
+ available_variables = {}
+ templar = Templar(loader=loader, variables=available_variables)
+ from_files = templar.template(self._from_files)
+
+ # build role
+ actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=from_files,
+ from_include=True, validate=self.rolespec_validate)
+ actual_role._metadata.allow_duplicates = self.allow_duplicates
+
+ if self.statically_loaded or self.public:
+ myplay.roles.append(actual_role)
+
+ # save this for later use
+ self._role_path = actual_role._role_path
+
+ # compile role with parent roles as dependencies to ensure they inherit
+ # variables
+ if not self._parent_role:
+ dep_chain = []
+ else:
+ dep_chain = list(self._parent_role._parents)
+ dep_chain.append(self._parent_role)
+
+ p_block = self.build_parent_block()
+
+ # collections value is not inherited; override with the value we calculated during role setup
+ p_block.collections = actual_role.collections
+
+ blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
+ for b in blocks:
+ b._parent = p_block
+ # HACK: parent inheritance doesn't seem to have a way to handle this intermediate override until squashed/finalized
+ b.collections = actual_role.collections
+
+ # updated available handlers in play
+ handlers = actual_role.get_handler_blocks(play=myplay)
+ for h in handlers:
+ h._parent = p_block
+ myplay.handlers = myplay.handlers + handlers
+ return blocks, handlers
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+
+ ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
+
+ # Validate options
+ my_arg_names = frozenset(ir.args.keys())
+
+ # name is needed, or use role as alias
+ ir._role_name = ir.args.get('name', ir.args.get('role'))
+ if ir._role_name is None:
+ raise AnsibleParserError("'name' is a required field for %s." % ir.action, obj=data)
+
+ if 'public' in ir.args and ir.action not in C._ACTION_INCLUDE_ROLE:
+ raise AnsibleParserError('Invalid options for %s: public' % ir.action, obj=data)
+
+ # validate bad args, otherwise we silently ignore
+ bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
+ if bad_opts:
+ raise AnsibleParserError('Invalid options for %s: %s' % (ir.action, ','.join(list(bad_opts))), obj=data)
+
+ # build options for role includes
+ for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
+ from_key = key.removesuffix('_from')
+ args_value = ir.args.get(key)
+ if not isinstance(args_value, string_types):
+ raise AnsibleParserError('Expected a string for %s but got %s instead' % (key, type(args_value)))
+ ir._from_files[from_key] = basename(args_value)
+
+ apply_attrs = ir.args.get('apply', {})
+ if apply_attrs and ir.action not in C._ACTION_INCLUDE_ROLE:
+ raise AnsibleParserError('Invalid options for %s: apply' % ir.action, obj=data)
+ elif not isinstance(apply_attrs, dict):
+ raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
+
+ # manual list as otherwise the options would set other task parameters we don't want.
+ for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
+ setattr(ir, option, ir.args.get(option))
+
+ return ir
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+
+ new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
+ new_me.statically_loaded = self.statically_loaded
+ new_me._from_files = self._from_files.copy()
+ new_me._parent_role = self._parent_role
+ new_me._role_name = self._role_name
+ new_me._role_path = self._role_path
+
+ return new_me
+
+ def get_include_params(self):
+ v = super(IncludeRole, self).get_include_params()
+ if self._parent_role:
+ v |= self._parent_role.get_role_params()
+ v.setdefault('ansible_parent_role_names', []).insert(0, self._parent_role.get_name())
+ v.setdefault('ansible_parent_role_paths', []).insert(0, self._parent_role._role_path)
+ return v
diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
new file mode 100644
index 0000000..4038d7f
--- /dev/null
+++ b/lib/ansible/playbook/taggable.py
@@ -0,0 +1,89 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.template import Templar
+
+
+class Taggable:
+
+ untagged = frozenset(['untagged'])
+ tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
+
+ def _load_tags(self, attr, ds):
+ if isinstance(ds, list):
+ return ds
+ elif isinstance(ds, string_types):
+ value = ds.split(',')
+ if isinstance(value, list):
+ return [x.strip() for x in value]
+ else:
+ return [ds]
+ else:
+ raise AnsibleError('tags must be specified as a list', obj=ds)
+
+ def evaluate_tags(self, only_tags, skip_tags, all_vars):
+ ''' this checks if the current item should be executed depending on tag options '''
+
+ if self.tags:
+ templar = Templar(loader=self._loader, variables=all_vars)
+ tags = templar.template(self.tags)
+
+ _temp_tags = set()
+ for tag in tags:
+ if isinstance(tag, list):
+ _temp_tags.update(tag)
+ else:
+ _temp_tags.add(tag)
+ tags = _temp_tags
+ self.tags = list(tags)
+ else:
+ # this makes isdisjoint work for untagged
+ tags = self.untagged
+
+ should_run = True # default, tasks to run
+
+ if only_tags:
+ if 'always' in tags:
+ should_run = True
+ elif ('all' in only_tags and 'never' not in tags):
+ should_run = True
+ elif not tags.isdisjoint(only_tags):
+ should_run = True
+ elif 'tagged' in only_tags and tags != self.untagged and 'never' not in tags:
+ should_run = True
+ else:
+ should_run = False
+
+ if should_run and skip_tags:
+
+ # Check for tags that we need to skip
+ if 'all' in skip_tags:
+ if 'always' not in tags or 'always' in skip_tags:
+ should_run = False
+ elif not tags.isdisjoint(skip_tags):
+ should_run = False
+ elif 'tagged' in skip_tags and tags != self.untagged:
+ should_run = False
+
+ return should_run
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
new file mode 100644
index 0000000..6a9136d
--- /dev/null
+++ b/lib/ansible/playbook/task.py
@@ -0,0 +1,511 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.plugins.loader import lookup_loader
+from ansible.playbook.attribute import FieldAttribute, NonInheritableFieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.block import Block
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.loop_control import LoopControl
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+
+__all__ = ['Task']
+
+display = Display()
+
+
+class Task(Base, Conditional, Taggable, CollectionSearch):
+
+ """
+ A task is a language feature that represents a call to a module, with given arguments and other parameters.
+ A handler is a subclass of a task.
+
+ Usage:
+
+ Task.load(datastructure) -> Task
+ Task.something(...)
+ """
+
+ # =================================================================================
+ # ATTRIBUTES
+ # load_<attribute_name> and
+ # validate_<attribute_name>
+ # will be used if defined
+ # might be possible to define others
+
+ # NOTE: ONLY set defaults on task attributes that are not inheritable,
+ # inheritance is only triggered if the 'current value' is Sentinel,
+ # default can be set at play/top level object and inheritance will take it's course.
+
+ args = FieldAttribute(isa='dict', default=dict)
+ action = FieldAttribute(isa='string')
+
+ async_val = FieldAttribute(isa='int', default=0, alias='async')
+ changed_when = FieldAttribute(isa='list', default=list)
+ delay = FieldAttribute(isa='int', default=5)
+ delegate_to = FieldAttribute(isa='string')
+ delegate_facts = FieldAttribute(isa='bool')
+ failed_when = FieldAttribute(isa='list', default=list)
+ loop = FieldAttribute()
+ loop_control = NonInheritableFieldAttribute(isa='class', class_type=LoopControl, default=LoopControl)
+ notify = FieldAttribute(isa='list')
+ poll = FieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
+ register = FieldAttribute(isa='string', static=True)
+ retries = FieldAttribute(isa='int', default=3)
+ until = FieldAttribute(isa='list', default=list)
+
+ # deprecated, used to be loop and loop_args but loop has been repurposed
+ loop_with = NonInheritableFieldAttribute(isa='string', private=True)
+
+ def __init__(self, block=None, role=None, task_include=None):
+ ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
+
+ self._role = role
+ self._parent = None
+ self.implicit = False
+ self.resolved_action = None
+
+ if task_include:
+ self._parent = task_include
+ else:
+ self._parent = block
+
+ super(Task, self).__init__()
+
+ def get_name(self, include_role_fqcn=True):
+ ''' return the name of the task '''
+
+ if self._role:
+ role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
+
+ if self._role and self.name:
+ return "%s : %s" % (role_name, self.name)
+ elif self.name:
+ return self.name
+ else:
+ if self._role:
+ return "%s : %s" % (role_name, self.action)
+ else:
+ return "%s" % (self.action,)
+
+ def _merge_kv(self, ds):
+ if ds is None:
+ return ""
+ elif isinstance(ds, string_types):
+ return ds
+ elif isinstance(ds, dict):
+ buf = ""
+ for (k, v) in ds.items():
+ if k.startswith('_'):
+ continue
+ buf = buf + "%s=%s " % (k, v)
+ buf = buf.strip()
+ return buf
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Task(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the task '''
+ if self.get_name() in C._ACTION_META:
+ return "TASK: meta (%s)" % self.args['_raw_params']
+ else:
+ return "TASK: %s" % self.get_name()
+
+ def _preprocess_with_loop(self, ds, new_ds, k, v):
+ ''' take a lookup plugin name and store it correctly '''
+
+ loop_name = k.removeprefix("with_")
+ if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
+ raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
+ if v is None:
+ raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
+ new_ds['loop_with'] = loop_name
+ new_ds['loop'] = v
+ # display.deprecated("with_ type loops are being phased out, use the 'loop' keyword instead",
+ # version="2.10", collection_name='ansible.builtin')
+
+ def preprocess_data(self, ds):
+ '''
+ tasks are especially complex arguments so need pre-processing.
+ keep it short.
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure suitable for the
+ # attributes of the task class
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
+ default_collection = AnsibleCollectionConfig.default_collection
+
+ collections_list = ds.get('collections')
+ if collections_list is None:
+ # use the parent value if our ds doesn't define it
+ collections_list = self.collections
+ else:
+ # Validate this untemplated field early on to guarantee we are dealing with a list.
+ # This is also done in CollectionSearch._load_collections() but this runs before that call.
+ collections_list = self.get_validated_value('collections', self.fattributes.get('collections'), collections_list, None)
+
+ if default_collection and not self._role: # FIXME: and not a collections role
+ if collections_list:
+ if default_collection not in collections_list:
+ collections_list.insert(0, default_collection)
+ else:
+ collections_list = [default_collection]
+
+ if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:
+ collections_list.append('ansible.legacy')
+
+ if collections_list:
+ ds['collections'] = collections_list
+
+ # use the args parsing class to determine the action, args,
+ # and the delegate_to value from the various possible forms
+ # supported as legacy
+ args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
+ try:
+ (action, args, delegate_to) = args_parser.parse()
+ except AnsibleParserError as e:
+ # if the raises exception was created with obj=ds args, then it includes the detail
+ # so we dont need to add it so we can just re raise.
+ if e.obj:
+ raise
+ # But if it wasn't, we can add the yaml object now to get more detail
+ raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
+ else:
+ self.resolved_action = args_parser.resolved_action
+
+ # the command/shell/script modules used to support the `cmd` arg,
+ # which corresponds to what we now call _raw_params, so move that
+ # value over to _raw_params (assuming it is empty)
+ if action in C._ACTION_HAS_CMD:
+ if 'cmd' in args:
+ if args.get('_raw_params', '') != '':
+ raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
+ " Please put everything in one or the other place.", obj=ds)
+ args['_raw_params'] = args.pop('cmd')
+
+ new_ds['action'] = action
+ new_ds['args'] = args
+ new_ds['delegate_to'] = delegate_to
+
+ # we handle any 'vars' specified in the ds here, as we may
+ # be adding things to them below (special handling for includes).
+ # When that deprecated feature is removed, this can be too.
+ if 'vars' in ds:
+ # _load_vars is defined in Base, and is used to load a dictionary
+ # or list of dictionaries in a standard way
+ new_ds['vars'] = self._load_vars(None, ds.get('vars'))
+ else:
+ new_ds['vars'] = dict()
+
+ for (k, v) in ds.items():
+ if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
+ # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
+ continue
+ elif k.startswith('with_') and k.removeprefix("with_") in lookup_loader:
+ # transform into loop property
+ self._preprocess_with_loop(ds, new_ds, k, v)
+ elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self.fattributes:
+ new_ds[k] = v
+ else:
+ display.warning("Ignoring invalid attribute: %s" % k)
+
+ return super(Task, self).preprocess_data(new_ds)
+
+ def _load_loop_control(self, attr, ds):
+ if not isinstance(ds, dict):
+ raise AnsibleParserError(
+ "the `loop_control` value must be specified as a dictionary and cannot "
+ "be a variable itself (though it can contain variables)",
+ obj=ds,
+ )
+
+ return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _validate_attributes(self, ds):
+ try:
+ super(Task, self)._validate_attributes(ds)
+ except AnsibleParserError as e:
+ e.message += '\nThis error can be suppressed as a warning using the "invalid_task_attribute_failed" configuration'
+ raise e
+
+ def _validate_changed_when(self, attr, name, value):
+ if not isinstance(value, list):
+ setattr(self, name, [value])
+
+ def _validate_failed_when(self, attr, name, value):
+ if not isinstance(value, list):
+ setattr(self, name, [value])
+
+ def post_validate(self, templar):
+ '''
+ Override of base class post_validate, to also do final validation on
+ the block and task include (if any) to which this task belongs.
+ '''
+
+ if self._parent:
+ self._parent.post_validate(templar)
+
+ if AnsibleCollectionConfig.default_collection:
+ pass
+
+ super(Task, self).post_validate(templar)
+
+ def _post_validate_loop(self, attr, value, templar):
+ '''
+ Override post validation for the loop field, which is templated
+ specially in the TaskExecutor class when evaluating loops.
+ '''
+ return value
+
+ def _post_validate_environment(self, attr, value, templar):
+ '''
+ Override post validation of vars on the play, as we don't want to
+ template these too early.
+ '''
+ env = {}
+ if value is not None:
+
+ def _parse_env_kv(k, v):
+ try:
+ env[k] = templar.template(v, convert_bare=False)
+ except AnsibleUndefinedVariable as e:
+ error = to_native(e)
+ if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
+ # ignore as fact gathering is required for 'env' facts
+ return
+ raise
+
+ if isinstance(value, list):
+ for env_item in value:
+ if isinstance(env_item, dict):
+ for k in env_item:
+ _parse_env_kv(k, env_item[k])
+ else:
+ isdict = templar.template(env_item, convert_bare=False)
+ if isinstance(isdict, dict):
+ env |= isdict
+ else:
+ display.warning("could not parse environment value, skipping: %s" % value)
+
+ elif isinstance(value, dict):
+ # should not really happen
+ env = dict()
+ for env_item in value:
+ _parse_env_kv(env_item, value[env_item])
+ else:
+ # at this point it should be a simple string, also should not happen
+ env = templar.template(value, convert_bare=False)
+
+ return env
+
+ def _post_validate_changed_when(self, attr, value, templar):
+ '''
+ changed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_failed_when(self, attr, value, templar):
+ '''
+ failed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_until(self, attr, value, templar):
+ '''
+ until is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def get_vars(self):
+ all_vars = dict()
+ if self._parent:
+ all_vars |= self._parent.get_vars()
+
+ all_vars |= self.vars
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+
+ return all_vars
+
+ def get_include_params(self):
+ all_vars = dict()
+ if self._parent:
+ all_vars |= self._parent.get_include_params()
+ if self.action in C._ACTION_ALL_INCLUDES:
+ all_vars |= self.vars
+ return all_vars
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ new_me = super(Task, self).copy()
+
+ new_me._parent = None
+ if self._parent and not exclude_parent:
+ new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me.implicit = self.implicit
+ new_me.resolved_action = self.resolved_action
+ new_me._uuid = self._uuid
+
+ return new_me
+
+ def serialize(self):
+ data = super(Task, self).serialize()
+
+ if not self._squashed and not self._finalized:
+ if self._parent:
+ data['parent'] = self._parent.serialize()
+ data['parent_type'] = self._parent.__class__.__name__
+
+ if self._role:
+ data['role'] = self._role.serialize()
+
+ data['implicit'] = self.implicit
+ data['resolved_action'] = self.resolved_action
+
+ return data
+
+ def deserialize(self, data):
+
+ # import is here to avoid import loops
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+
+ parent_data = data.get('parent', None)
+ if parent_data:
+ parent_type = data.get('parent_type')
+ if parent_type == 'Block':
+ p = Block()
+ elif parent_type == 'TaskInclude':
+ p = TaskInclude()
+ elif parent_type == 'HandlerTaskInclude':
+ p = HandlerTaskInclude()
+ p.deserialize(parent_data)
+ self._parent = p
+ del data['parent']
+
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+ del data['role']
+
+ self.implicit = data.get('implicit', False)
+ self.resolved_action = data.get('resolved_action')
+
+ super(Task, self).deserialize(data)
+
+ def set_loader(self, loader):
+ '''
+ Sets the loader on this object and recursively on parent, child objects.
+ This is used primarily after the Task has been serialized/deserialized, which
+ does not preserve the loader.
+ '''
+
+ self._loader = loader
+
+ if self._parent:
+ self._parent.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, omit=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+ fattr = self.fattributes[attr]
+
+ extend = fattr.extend
+ prepend = fattr.prepend
+
+ try:
+ # omit self, and only get parent values
+ if omit:
+ value = Sentinel
+ else:
+ value = getattr(self, f'_{attr}', Sentinel)
+
+ # If parent is static, we can grab attrs from the parent
+ # otherwise, defer to the grandparent
+ if getattr(self._parent, 'statically_loaded', True):
+ _parent = self._parent
+ else:
+ _parent = self._parent._parent
+
+ if _parent and (value is Sentinel or extend):
+ if getattr(_parent, 'statically_loaded', True):
+ # vars are always inheritable, other attributes might not be for the parent but still should be for other ancestors
+ if attr != 'vars' and hasattr(_parent, '_get_parent_attribute'):
+ parent_value = _parent._get_parent_attribute(attr)
+ else:
+ parent_value = getattr(_parent, f'_{attr}', Sentinel)
+
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+ except KeyError:
+ pass
+
+ return value
+
+ def all_parents_static(self):
+ if self._parent:
+ return self._parent.all_parents_static()
+ return True
+
+ def get_first_parent_include(self):
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude):
+ return self._parent
+ return self._parent.get_first_parent_include()
+ return None
diff --git a/lib/ansible/playbook/task_include.py b/lib/ansible/playbook/task_include.py
new file mode 100644
index 0000000..9c335c6
--- /dev/null
+++ b/lib/ansible/playbook/task_include.py
@@ -0,0 +1,150 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+
+__all__ = ['TaskInclude']
+
+display = Display()
+
+
+class TaskInclude(Task):
+
+ """
+ A task include is derived from a regular task to handle the special
+ circumstances related to the `- include: ...` task.
+ """
+
+ BASE = frozenset(('file', '_raw_params')) # directly assigned
+ OTHER_ARGS = frozenset(('apply',)) # assigned to matching property
+ VALID_ARGS = BASE.union(OTHER_ARGS) # all valid args
+ VALID_INCLUDE_KEYWORDS = frozenset(('action', 'args', 'collections', 'debugger', 'ignore_errors', 'loop', 'loop_control',
+ 'loop_with', 'name', 'no_log', 'register', 'run_once', 'tags', 'timeout', 'vars',
+ 'when'))
+
+ def __init__(self, block=None, role=None, task_include=None):
+ super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
+ self.statically_loaded = False
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ ti = TaskInclude(block=block, role=role, task_include=task_include)
+ task = ti.check_options(
+ ti.load_data(data, variable_manager=variable_manager, loader=loader),
+ data
+ )
+
+ return task
+
+ def check_options(self, task, data):
+ '''
+ Method for options validation to use in 'load_data' for TaskInclude and HandlerTaskInclude
+ since they share the same validations. It is not named 'validate_options' on purpose
+ to prevent confusion with '_validate_*" methods. Note that the task passed might be changed
+ as a side-effect of this method.
+ '''
+ my_arg_names = frozenset(task.args.keys())
+
+ # validate bad args, otherwise we silently ignore
+ bad_opts = my_arg_names.difference(self.VALID_ARGS)
+ if bad_opts and task.action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS:
+ raise AnsibleParserError('Invalid options for %s: %s' % (task.action, ','.join(list(bad_opts))), obj=data)
+
+ if not task.args.get('_raw_params'):
+ task.args['_raw_params'] = task.args.pop('file', None)
+ if not task.args['_raw_params']:
+ raise AnsibleParserError('No file specified for %s' % task.action)
+
+ apply_attrs = task.args.get('apply', {})
+ if apply_attrs and task.action not in C._ACTION_INCLUDE_TASKS:
+ raise AnsibleParserError('Invalid options for %s: apply' % task.action, obj=data)
+ elif not isinstance(apply_attrs, dict):
+ raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
+
+ return task
+
+ def preprocess_data(self, ds):
+ ds = super(TaskInclude, self).preprocess_data(ds)
+
+ diff = set(ds.keys()).difference(self.VALID_INCLUDE_KEYWORDS)
+ for k in diff:
+ # This check doesn't handle ``include`` as we have no idea at this point if it is static or not
+ if ds[k] is not Sentinel and ds['action'] in C._ACTION_ALL_INCLUDE_ROLE_TASKS:
+ if C.INVALID_TASK_ATTRIBUTE_FAILED:
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (k, self.__class__.__name__), obj=ds)
+ else:
+ display.warning("Ignoring invalid attribute: %s" % k)
+
+ return ds
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ new_me = super(TaskInclude, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
+ new_me.statically_loaded = self.statically_loaded
+ return new_me
+
+ def get_vars(self):
+ '''
+ We override the parent Task() classes get_vars here because
+ we need to include the args of the include into the vars as
+ they are params to the included tasks. But ONLY for 'include'
+ '''
+ if self.action not in C._ACTION_INCLUDE:
+ all_vars = super(TaskInclude, self).get_vars()
+ else:
+ all_vars = dict()
+ if self._parent:
+ all_vars |= self._parent.get_vars()
+
+ all_vars |= self.vars
+ all_vars |= self.args
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+
+ return all_vars
+
+ def build_parent_block(self):
+ '''
+ This method is used to create the parent block for the included tasks
+ when ``apply`` is specified
+ '''
+ apply_attrs = self.args.pop('apply', {})
+ if apply_attrs:
+ apply_attrs['block'] = []
+ p_block = Block.load(
+ apply_attrs,
+ play=self._parent._play,
+ task_include=self,
+ role=self._role,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ p_block = self
+
+ return p_block
diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
new file mode 100644
index 0000000..4d1f3b1
--- /dev/null
+++ b/lib/ansible/plugins/__init__.py
@@ -0,0 +1,143 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import ABC
+
+import types
+import typing as t
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.utils.display import Display
+
+display = Display()
+
+if t.TYPE_CHECKING:
+ from .loader import PluginPathContext
+
+# Global so that all instances of a PluginLoader will share the caches
+MODULE_CACHE = {} # type: dict[str, dict[str, types.ModuleType]]
+PATH_CACHE = {} # type: dict[str, list[PluginPathContext] | None]
+PLUGIN_PATH_CACHE = {} # type: dict[str, dict[str, dict[str, PluginPathContext]]]
+
+
+def get_plugin_class(obj):
+ if isinstance(obj, string_types):
+ return obj.lower().replace('module', '')
+ else:
+ return obj.__class__.__name__.lower().replace('module', '')
+
+
+class AnsiblePlugin(ABC):
+
+ # allow extra passthrough parameters
+ allow_extras = False
+
+ def __init__(self):
+ self._options = {}
+ self._defs = None
+
+ def matches_name(self, possible_names):
+ possible_fqcns = set()
+ for name in possible_names:
+ if '.' not in name:
+ possible_fqcns.add(f"ansible.builtin.{name}")
+ elif name.startswith("ansible.legacy."):
+ possible_fqcns.add(name.removeprefix("ansible.legacy."))
+ possible_fqcns.add(name)
+ return bool(possible_fqcns.intersection(set(self.ansible_aliases)))
+
+ def get_option(self, option, hostvars=None):
+ if option not in self._options:
+ try:
+ option_value = C.config.get_config_value(option, plugin_type=self.plugin_type, plugin_name=self._load_name, variables=hostvars)
+ except AnsibleError as e:
+ raise KeyError(to_native(e))
+ self.set_option(option, option_value)
+ return self._options.get(option)
+
+ def get_options(self, hostvars=None):
+ options = {}
+ for option in self.option_definitions.keys():
+ options[option] = self.get_option(option, hostvars=hostvars)
+ return options
+
+ def set_option(self, option, value):
+ self._options[option] = value
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ '''
+ Sets the _options attribute with the configuration/keyword information for this plugin
+
+ :arg task_keys: Dict with playbook keywords that affect this option
+ :arg var_options: Dict with either 'connection variables'
+ :arg direct: Dict with 'direct assignment'
+ '''
+ self._options = C.config.get_plugin_options(self.plugin_type, self._load_name, keys=task_keys, variables=var_options, direct=direct)
+
+ # allow extras/wildcards from vars that are not directly consumed in configuration
+ # this is needed to support things like winrm that can have extended protocol options we don't directly handle
+ if self.allow_extras and var_options and '_extras' in var_options:
+ self.set_option('_extras', var_options['_extras'])
+
+ def has_option(self, option):
+ if not self._options:
+ self.set_options()
+ return option in self._options
+
+ @property
+ def plugin_type(self):
+ return self.__class__.__name__.lower().replace('module', '')
+
+ @property
+ def option_definitions(self):
+ if self._defs is None:
+ self._defs = C.config.get_configuration_definitions(plugin_type=self.plugin_type, name=self._load_name)
+ return self._defs
+
+ def _check_required(self):
+ # FIXME: standardize required check based on config
+ pass
+
+
+class AnsibleJinja2Plugin(AnsiblePlugin):
+
+ def __init__(self, function):
+
+ super(AnsibleJinja2Plugin, self).__init__()
+ self._function = function
+
+ @property
+ def plugin_type(self):
+ return self.__class__.__name__.lower().replace('ansiblejinja2', '')
+
+ def _no_options(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ has_option = get_option = get_options = option_definitions = set_option = set_options = _no_options
+
+ @property
+ def j2_function(self):
+ return self._function
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
new file mode 100644
index 0000000..d199207
--- /dev/null
+++ b/lib/ansible/plugins/action/__init__.py
@@ -0,0 +1,1431 @@
+# coding: utf-8
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import random
+import re
+import shlex
+import stat
+import tempfile
+
+from abc import ABC, abstractmethod
+from collections.abc import Sequence
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsibleAuthenticationFailure
+from ansible.executor.module_common import modify_module
+from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
+from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
+from ansible.module_utils.errors import UnsupportedError
+from ansible.module_utils.json_utils import _filter_non_json_lines
+from ansible.module_utils.six import binary_type, string_types, text_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.utils.jsonify import jsonify
+from ansible.release import __version__
+from ansible.utils.collection_loader import resource_from_fqcr
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
+from ansible.vars.clean import remove_internal_keys
+from ansible.utils.plugin_docs import get_versioned_doclink
+
+display = Display()
+
+
+class ActionBase(ABC):
+
+ '''
+ This class is the base class for all action plugins, and defines
+ code common to all actions. The base class handles the connection
+ by putting/getting files and executing commands based on the current
+ action in use.
+ '''
+
+ # A set of valid arguments
+ _VALID_ARGS = frozenset([]) # type: frozenset[str]
+
+ def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
+ self._task = task
+ self._connection = connection
+ self._play_context = play_context
+ self._loader = loader
+ self._templar = templar
+ self._shared_loader_obj = shared_loader_obj
+ self._cleanup_remote_tmp = False
+
+ self._supports_check_mode = True
+ self._supports_async = False
+
+ # interpreter discovery state
+ self._discovered_interpreter_key = None
+ self._discovered_interpreter = False
+ self._discovery_deprecation_warnings = []
+ self._discovery_warnings = []
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ self._used_interpreter = None
+
+ @abstractmethod
+ def run(self, tmp=None, task_vars=None):
+ """ Action Plugins should implement this method to perform their
+ tasks. Everything else in this base class is a helper method for the
+ action plugin to do that.
+
+ :kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
+ another one and wants to use the same remote tmp for both should set
+ self._connection._shell.tmpdir rather than this parameter.
+ :kwarg task_vars: The variables (host vars, group vars, config vars,
+ etc) associated with this task.
+ :returns: dictionary of results from the module
+
+ Implementors of action modules may find the following variables especially useful:
+
+ * Module parameters. These are stored in self._task.args
+ """
+
+ # does not default to {'changed': False, 'failed': False}, as it breaks async
+ result = {}
+
+ if tmp is not None:
+ result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
+ ' plugins should set self._connection._shell.tmpdir to share'
+ ' the tmpdir']
+ del tmp
+
+ if self._task.async_val and not self._supports_async:
+ raise AnsibleActionFail('async is not supported for this task.')
+ elif self._task.check_mode and not self._supports_check_mode:
+ raise AnsibleActionSkip('check mode is not supported for this task.')
+ elif self._task.async_val and self._task.check_mode:
+ raise AnsibleActionFail('check mode and async cannot be used on same task.')
+
+ # Error if invalid argument is passed
+ if self._VALID_ARGS:
+ task_opts = frozenset(self._task.args.keys())
+ bad_opts = task_opts.difference(self._VALID_ARGS)
+ if bad_opts:
+ raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
+
+ if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
+ self._make_tmp_path()
+
+ return result
+
+ def validate_argument_spec(self, argument_spec=None,
+ mutually_exclusive=None,
+ required_together=None,
+ required_one_of=None,
+ required_if=None,
+ required_by=None,
+ ):
+ """Validate an argument spec against the task args
+
+ This will return a tuple of (ValidationResult, dict) where the dict
+ is the validated, coerced, and normalized task args.
+
+ Be cautious when directly passing ``new_module_args`` directly to a
+ module invocation, as it will contain the defaults, and not only
+ the args supplied from the task. If you do this, the module
+ should not define ``mututally_exclusive`` or similar.
+
+ This code is roughly copied from the ``validate_argument_spec``
+ action plugin for use by other action plugins.
+ """
+
+ new_module_args = self._task.args.copy()
+
+ validator = ArgumentSpecValidator(
+ argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ required_if=required_if,
+ required_by=required_by,
+ )
+ validation_result = validator.validate(new_module_args)
+
+ new_module_args.update(validation_result.validated_parameters)
+
+ try:
+ error = validation_result.errors[0]
+ except IndexError:
+ error = None
+
+ # Fail for validation errors, even in check mode
+ if error:
+ msg = validation_result.errors.msg
+ if isinstance(error, UnsupportedError):
+ msg = f"Unsupported parameters for ({self._load_name}) module: {msg}"
+
+ raise AnsibleActionFail(msg)
+
+ return validation_result, new_module_args
+
+ def cleanup(self, force=False):
+ """Method to perform a clean up at the end of an action plugin execution
+
+ By default this is designed to clean up the shell tmpdir, and is toggled based on whether
+ async is in use
+
+ Action plugins may override this if they deem necessary, but should still call this method
+ via super
+ """
+ if force or not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ def get_plugin_option(self, plugin, option, default=None):
+ """Helper to get an option from a plugin without having to use
+ the try/except dance everywhere to set a default
+ """
+ try:
+ return plugin.get_option(option)
+ except (AttributeError, KeyError):
+ return default
+
+ def get_become_option(self, option, default=None):
+ return self.get_plugin_option(self._connection.become, option, default=default)
+
+ def get_connection_option(self, option, default=None):
+ return self.get_plugin_option(self._connection, option, default=default)
+
+ def get_shell_option(self, option, default=None):
+ return self.get_plugin_option(self._connection._shell, option, default=default)
+
+ def _remote_file_exists(self, path):
+ cmd = self._connection._shell.exists(path)
+ result = self._low_level_execute_command(cmd=cmd, sudoable=True)
+ if result['rc'] == 0:
+ return True
+ return False
+
+ def _configure_module(self, module_name, module_args, task_vars):
+ '''
+ Handles the loading and templating of the module code through the
+ modify_module() function.
+ '''
+ if self._task.delegate_to:
+ use_vars = task_vars.get('ansible_delegated_vars')[self._task.delegate_to]
+ else:
+ use_vars = task_vars
+
+ split_module_name = module_name.split('.')
+ collection_name = '.'.join(split_module_name[0:2]) if len(split_module_name) > 2 else ''
+ leaf_module_name = resource_from_fqcr(module_name)
+
+ # Search module path(s) for named module.
+ for mod_type in self._connection.module_implementation_preferences:
+ # Check to determine if PowerShell modules are supported, and apply
+ # some fixes (hacks) to module name + args.
+ if mod_type == '.ps1':
+ # FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping
+ # for each subsystem.
+ win_collection = 'ansible.windows'
+ rewrite_collection_names = ['ansible.builtin', 'ansible.legacy', '']
+ # async_status, win_stat, win_file, win_copy, and win_ping are not just like their
+ # python counterparts but they are compatible enough for our
+ # internal usage
+ # NB: we only rewrite the module if it's not being called by the user (eg, an action calling something else)
+ # and if it's unqualified or FQ to a builtin
+ if leaf_module_name in ('stat', 'file', 'copy', 'ping') and \
+ collection_name in rewrite_collection_names and self._task.action != module_name:
+ module_name = '%s.win_%s' % (win_collection, leaf_module_name)
+ elif leaf_module_name == 'async_status' and collection_name in rewrite_collection_names:
+ module_name = '%s.%s' % (win_collection, leaf_module_name)
+
+ # TODO: move this tweak down to the modules, not extensible here
+ # Remove extra quotes surrounding path parameters before sending to module.
+ if leaf_module_name in ['win_stat', 'win_file', 'win_copy', 'slurp'] and module_args and \
+ hasattr(self._connection._shell, '_unquote'):
+ for key in ('src', 'dest', 'path'):
+ if key in module_args:
+ module_args[key] = self._connection._shell._unquote(module_args[key])
+
+ result = self._shared_loader_obj.module_loader.find_plugin_with_context(module_name, mod_type, collection_list=self._task.collections)
+
+ if not result.resolved:
+ if result.redirect_list and len(result.redirect_list) > 1:
+ # take the last one in the redirect list, we may have successfully jumped through N other redirects
+ target_module_name = result.redirect_list[-1]
+
+ raise AnsibleError("The module {0} was redirected to {1}, which could not be loaded.".format(module_name, target_module_name))
+
+ module_path = result.plugin_resolved_path
+ if module_path:
+ break
+ else: # This is a for-else: http://bit.ly/1ElPkyg
+ raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
+
+ # insert shared code and arguments into the module
+ final_environment = dict()
+ self._compute_environment_string(final_environment)
+
+ become_kwargs = {}
+ if self._connection.become:
+ become_kwargs['become'] = True
+ become_kwargs['become_method'] = self._connection.become.name
+ become_kwargs['become_user'] = self._connection.become.get_option('become_user',
+ playcontext=self._play_context)
+ become_kwargs['become_password'] = self._connection.become.get_option('become_pass',
+ playcontext=self._play_context)
+ become_kwargs['become_flags'] = self._connection.become.get_option('become_flags',
+ playcontext=self._play_context)
+
+ # modify_module will exit early if interpreter discovery is required; re-run after if necessary
+ for dummy in (1, 2):
+ try:
+ (module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
+ task_vars=use_vars,
+ module_compression=self._play_context.module_compression,
+ async_timeout=self._task.async_val,
+ environment=final_environment,
+ remote_is_local=bool(getattr(self._connection, '_remote_is_local', False)),
+ **become_kwargs)
+ break
+ except InterpreterDiscoveryRequiredError as idre:
+ self._discovered_interpreter = AnsibleUnsafeText(discover_interpreter(
+ action=self,
+ interpreter_name=idre.interpreter_name,
+ discovery_mode=idre.discovery_mode,
+ task_vars=use_vars))
+
+ # update the local task_vars with the discovered interpreter (which might be None);
+ # we'll propagate back to the controller in the task result
+ discovered_key = 'discovered_interpreter_%s' % idre.interpreter_name
+
+ # update the local vars copy for the retry
+ use_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
+
+ # TODO: this condition prevents 'wrong host' from being updated
+ # but in future we would want to be able to update 'delegated host facts'
+ # irrespective of task settings
+ if not self._task.delegate_to or self._task.delegate_facts:
+ # store in local task_vars facts collection for the retry and any other usages in this worker
+ task_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
+ # preserve this so _execute_module can propagate back to controller as a fact
+ self._discovered_interpreter_key = discovered_key
+ else:
+ task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter
+
+ return (module_style, module_shebang, module_data, module_path)
+
+ def _compute_environment_string(self, raw_environment_out=None):
+ '''
+ Builds the environment string to be used when executing the remote task.
+ '''
+
+ final_environment = dict()
+ if self._task.environment is not None:
+ environments = self._task.environment
+ if not isinstance(environments, list):
+ environments = [environments]
+
+ # The order of environments matters to make sure we merge
+ # in the parent's values first so those in the block then
+ # task 'win' in precedence
+ for environment in environments:
+ if environment is None or len(environment) == 0:
+ continue
+ temp_environment = self._templar.template(environment)
+ if not isinstance(temp_environment, dict):
+ raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
+ # very deliberately using update here instead of combine_vars, as
+ # these environment settings should not need to merge sub-dicts
+ final_environment.update(temp_environment)
+
+ if len(final_environment) > 0:
+ final_environment = self._templar.template(final_environment)
+
+ if isinstance(raw_environment_out, dict):
+ raw_environment_out.clear()
+ raw_environment_out.update(final_environment)
+
+ return self._connection._shell.env_prefix(**final_environment)
+
+ def _early_needs_tmp_path(self):
+ '''
+ Determines if a tmp path should be created before the action is executed.
+ '''
+
+ return getattr(self, 'TRANSFERS_FILES', False)
+
+ def _is_pipelining_enabled(self, module_style, wrap_async=False):
+ '''
+ Determines if we are required and can do pipelining
+ '''
+
+ try:
+ is_enabled = self._connection.get_option('pipelining')
+ except (KeyError, AttributeError, ValueError):
+ is_enabled = self._play_context.pipelining
+
+ # winrm supports async pipeline
+ # TODO: make other class property 'has_async_pipelining' to separate cases
+ always_pipeline = self._connection.always_pipeline_modules
+
+ # su does not work with pipelining
+ # TODO: add has_pipelining class prop to become plugins
+ become_exception = (self._connection.become.name if self._connection.become else '') != 'su'
+
+ # any of these require a true
+ conditions = [
+ self._connection.has_pipelining, # connection class supports it
+ is_enabled or always_pipeline, # enabled via config or forced via connection (eg winrm)
+ module_style == "new", # old style modules do not support pipelining
+ not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
+ not wrap_async or always_pipeline, # async does not normally support pipelining unless it does (eg winrm)
+ become_exception,
+ ]
+
+ return all(conditions)
+
+ def _get_admin_users(self):
+ '''
+ Returns a list of admin users that are configured for the current shell
+ plugin
+ '''
+
+ return self.get_shell_option('admin_users', ['root'])
+
+ def _get_remote_addr(self, tvars):
+ ''' consistently get the 'remote_address' for the action plugin '''
+ remote_addr = tvars.get('delegated_vars', {}).get('ansible_host', tvars.get('ansible_host', tvars.get('inventory_hostname', None)))
+ for variation in ('remote_addr', 'host'):
+ try:
+ remote_addr = self._connection.get_option(variation)
+ except KeyError:
+ continue
+ break
+ else:
+ # plugin does not have, fallback to play_context
+ remote_addr = self._play_context.remote_addr
+ return remote_addr
+
+ def _get_remote_user(self):
+ ''' consistently get the 'remote_user' for the action plugin '''
+ # TODO: use 'current user running ansible' as fallback when moving away from play_context
+ # pwd.getpwuid(os.getuid()).pw_name
+ remote_user = None
+ try:
+ remote_user = self._connection.get_option('remote_user')
+ except KeyError:
+ # plugin does not have remote_user option, fallback to default and/play_context
+ remote_user = getattr(self._connection, 'default_user', None) or self._play_context.remote_user
+ except AttributeError:
+ # plugin does not use config system, fallback to old play_context
+ remote_user = self._play_context.remote_user
+ return remote_user
+
+ def _is_become_unprivileged(self):
+ '''
+ The user is not the same as the connection user and is not part of the
+ shell configured admin users
+ '''
+ # if we don't use become then we know we aren't switching to a
+ # different unprivileged user
+ if not self._connection.become:
+ return False
+
+ # if we use become and the user is not an admin (or same user) then
+ # we need to return become_unprivileged as True
+ admin_users = self._get_admin_users()
+ remote_user = self._get_remote_user()
+ become_user = self.get_become_option('become_user')
+ return bool(become_user and become_user not in admin_users + [remote_user])
+
+ def _make_tmp_path(self, remote_user=None):
+ '''
+ Create and return a temporary path on a remote box.
+ '''
+
+ # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
+ # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
+ # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
+ if getattr(self._connection, '_remote_is_local', False):
+ tmpdir = C.DEFAULT_LOCAL_TMP
+ else:
+ # NOTE: shell plugins should populate this setting anyways, but they dont do remote expansion, which
+ # we need for 'non posix' systems like cloud-init and solaris
+ tmpdir = self._remote_expand_user(self.get_shell_option('remote_tmp', default='~/.ansible/tmp'), sudoable=False)
+
+ become_unprivileged = self._is_become_unprivileged()
+ basefile = self._connection._shell._generate_temp_dir_name()
+ cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
+ result = self._low_level_execute_command(cmd, sudoable=False)
+
+ # error handling on this seems a little aggressive?
+ if result['rc'] != 0:
+ if result['rc'] == 5:
+ output = 'Authentication failure.'
+ elif result['rc'] == 255 and self._connection.transport in ('ssh',):
+
+ if display.verbosity > 3:
+ output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
+ else:
+ output = (u'SSH encountered an unknown error during the connection. '
+ 'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
+
+ elif u'No space left on device' in result['stderr']:
+ output = result['stderr']
+ else:
+ output = ('Failed to create temporary directory. '
+ 'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
+ 'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp", for more error information use -vvv. '
+ 'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
+ if 'stdout' in result and result['stdout'] != u'':
+ output = output + u", stdout output: %s" % result['stdout']
+ if display.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
+ output += u", stderr output: %s" % result['stderr']
+ raise AnsibleConnectionFailure(output)
+ else:
+ self._cleanup_remote_tmp = True
+
+ try:
+ stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
+ rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
+ except IndexError:
+ # stdout was empty or just space, set to / to trigger error in next if
+ rc = '/'
+
+ # Catch failure conditions, files should never be
+ # written to locations in /.
+ if rc == '/':
+ raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
+
+ self._connection._shell.tmpdir = rc
+
+ return rc
+
+ def _should_remove_tmp_path(self, tmp_path):
+ '''Determine if temporary path should be deleted or kept by user request/config'''
+ return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
+
+ def _remove_tmp_path(self, tmp_path, force=False):
+ '''Remove a temporary path we created. '''
+
+ if tmp_path is None and self._connection._shell.tmpdir:
+ tmp_path = self._connection._shell.tmpdir
+
+ if force or self._should_remove_tmp_path(tmp_path):
+ cmd = self._connection._shell.remove(tmp_path, recurse=True)
+ # If we have gotten here we have a working connection configuration.
+ # If the connection breaks we could leave tmp directories out on the remote system.
+ tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
+
+ if tmp_rm_res.get('rc', 0) != 0:
+ display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
+ % (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
+ else:
+ self._connection._shell.tmpdir = None
+
+ def _transfer_file(self, local_path, remote_path):
+ """
+ Copy a file from the controller to a remote path
+
+ :arg local_path: Path on controller to transfer
+ :arg remote_path: Path on the remote system to transfer into
+
+ .. warning::
+ * When you use this function you likely want to use use fixup_perms2() on the
+ remote_path to make sure that the remote file is readable when the user becomes
+ a non-privileged user.
+ * If you use fixup_perms2() on the file and copy or move the file into place, you will
+ need to then remove filesystem acls on the file once it has been copied into place by
+ the module. See how the copy module implements this for help.
+ """
+ self._connection.put_file(local_path, remote_path)
+ return remote_path
+
+ def _transfer_data(self, remote_path, data):
+ '''
+ Copies the module data out to the temporary module path.
+ '''
+
+ if isinstance(data, dict):
+ data = jsonify(data)
+
+ afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ afo = os.fdopen(afd, 'wb')
+ try:
+ data = to_bytes(data, errors='surrogate_or_strict')
+ afo.write(data)
+ except Exception as e:
+ raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
+
+ afo.flush()
+ afo.close()
+
+ try:
+ self._transfer_file(afile, remote_path)
+ finally:
+ os.unlink(afile)
+
+ return remote_path
+
+ def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
+ """
+ We need the files we upload to be readable (and sometimes executable)
+ by the user being sudo'd to but we want to limit other people's access
+ (because the files could contain passwords or other private
+ information. We achieve this in one of these ways:
+
+ * If no sudo is performed or the remote_user is sudo'ing to
+ themselves, we don't have to change permissions.
+ * If the remote_user sudo's to a privileged user (for instance, root),
+ we don't have to change permissions
+ * If the remote_user sudo's to an unprivileged user then we attempt to
+ grant the unprivileged user access via file system acls.
+ * If granting file system acls fails we try to change the owner of the
+ file with chown which only works in case the remote_user is
+ privileged or the remote systems allows chown calls by unprivileged
+ users (e.g. HP-UX)
+ * If the above fails, we next try 'chmod +a' which is a macOS way of
+ setting ACLs on files.
+ * If the above fails, we check if ansible_common_remote_group is set.
+ If it is, we attempt to chgrp the file to its value. This is useful
+ if the remote_user has a group in common with the become_user. As the
+ remote_user, we can chgrp the file to that group and allow the
+ become_user to read it.
+ * If (the chown fails AND ansible_common_remote_group is not set) OR
+ (ansible_common_remote_group is set AND the chgrp (or following chmod)
+ returned non-zero), we can set the file to be world readable so that
+ the second unprivileged user can read the file.
+ Since this could allow other users to get access to private
+ information we only do this if ansible is configured with
+ "allow_world_readable_tmpfiles" in the ansible.cfg. Also note that
+ when ansible_common_remote_group is set this final fallback is very
+ unlikely to ever be triggered, so long as chgrp was successful. But
+ just because the chgrp was successful, does not mean Ansible can
+ necessarily access the files (if, for example, the variable was set
+ to a group that remote_user is in, and can chgrp to, but does not have
+ in common with become_user).
+ """
+ if remote_user is None:
+ remote_user = self._get_remote_user()
+
+ # Step 1: Are we on windows?
+ if getattr(self._connection._shell, "_IS_WINDOWS", False):
+ # This won't work on Powershell as-is, so we'll just completely
+ # skip until we have a need for it, at which point we'll have to do
+ # something different.
+ return remote_paths
+
+ # Step 2: If we're not becoming an unprivileged user, we are roughly
+ # done. Make the files +x if we're asked to, and return.
+ if not self._is_become_unprivileged():
+ if execute:
+ # Can't depend on the file being transferred with execute permissions.
+ # Only need user perms because no become was used here
+ res = self._remote_chmod(remote_paths, 'u+x')
+ if res['rc'] != 0:
+ raise AnsibleError(
+ 'Failed to set execute bit on remote files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+ return remote_paths
+
+ # If we're still here, we have an unprivileged user that's different
+ # than the ssh user.
+ become_user = self.get_become_option('become_user')
+
+ # Try to use file system acls to make the files readable for sudo'd
+ # user
+ if execute:
+ chmod_mode = 'rx'
+ setfacl_mode = 'r-x'
+ # Apple patches their "file_cmds" chmod with ACL support
+ chmod_acl_mode = '{0} allow read,execute'.format(become_user)
+ # POSIX-draft ACL specification. Solaris, maybe others.
+ # See chmod(1) on something Solaris-based for syntax details.
+ posix_acl_mode = 'A+user:{0}:rx:allow'.format(become_user)
+ else:
+ chmod_mode = 'rX'
+ # TODO: this form fails silently on freebsd. We currently
+ # never call _fixup_perms2() with execute=False but if we
+ # start to we'll have to fix this.
+ setfacl_mode = 'r-X'
+ # Apple
+ chmod_acl_mode = '{0} allow read'.format(become_user)
+ # POSIX-draft
+ posix_acl_mode = 'A+user:{0}:r:allow'.format(become_user)
+
+ # Step 3a: Are we able to use setfacl to add user ACLs to the file?
+ res = self._remote_set_user_facl(
+ remote_paths,
+ become_user,
+ setfacl_mode)
+
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 3b: Set execute if we need to. We do this before anything else
+ # because some of the methods below might work but not let us set +x
+ # as part of them.
+ if execute:
+ res = self._remote_chmod(remote_paths, 'u+x')
+ if res['rc'] != 0:
+ raise AnsibleError(
+ 'Failed to set file mode or acl on remote temporary files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+
+ # Step 3c: File system ACLs failed above; try falling back to chown.
+ res = self._remote_chown(remote_paths, become_user)
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Check if we are an admin/root user. If we are and got here, it means
+ # we failed to chown as root and something weird has happened.
+ if remote_user in self._get_admin_users():
+ raise AnsibleError(
+ 'Failed to change ownership of the temporary files Ansible '
+ '(via chmod nor setfacl) needs to create despite connecting as a '
+ 'privileged user. Unprivileged become user would be unable to read'
+ ' the file.')
+
+ # Step 3d: Try macOS's special chmod + ACL
+ # macOS chmod's +a flag takes its own argument. As a slight hack, we
+ # pass that argument as the first element of remote_paths. So we end
+ # up running `chmod +a [that argument] [file 1] [file 2] ...`
+ try:
+ res = self._remote_chmod([chmod_acl_mode] + list(remote_paths), '+a')
+ except AnsibleAuthenticationFailure as e:
+ # Solaris-based chmod will return 5 when it sees an invalid mode,
+ # and +a is invalid there. Because it returns 5, which is the same
+ # thing sshpass returns on auth failure, our sshpass code will
+ # assume that auth failed. If we don't handle that case here, none
+ # of the other logic below will get run. This is fairly hacky and a
+ # corner case, but probably one that shows up pretty often in
+ # Solaris-based environments (and possibly others).
+ pass
+ else:
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 3e: Try Solaris/OpenSolaris/OpenIndiana-sans-setfacl chmod
+ # Similar to macOS above, Solaris 11.4 drops setfacl and takes file ACLs
+ # via chmod instead. OpenSolaris and illumos-based distros allow for
+ # using either setfacl or chmod, and compatibility depends on filesystem.
+ # It should be possible to debug this branch by installing OpenIndiana
+ # (use ZFS) and going unpriv -> unpriv.
+ res = self._remote_chmod(remote_paths, posix_acl_mode)
+ if res['rc'] == 0:
+ return remote_paths
+
+ # we'll need this down here
+ become_link = get_versioned_doclink('user_guide/become.html')
+
+ # Step 3f: Common group
+ # Otherwise, we're a normal user. We failed to chown the paths to the
+ # unprivileged user, but if we have a common group with them, we should
+ # be able to chown it to that.
+ #
+ # Note that we have no way of knowing if this will actually work... just
+ # because chgrp exits successfully does not mean that Ansible will work.
+ # We could check if the become user is in the group, but this would
+ # create an extra round trip.
+ #
+ # Also note that due to the above, this can prevent the
+ # world_readable_temp logic below from ever getting called. We
+ # leave this up to the user to rectify if they have both of these
+ # features enabled.
+ group = self.get_shell_option('common_remote_group')
+ if group is not None:
+ res = self._remote_chgrp(remote_paths, group)
+ if res['rc'] == 0:
+ # warn user that something might go weirdly here.
+ if self.get_shell_option('world_readable_temp'):
+ display.warning(
+ 'Both common_remote_group and '
+ 'allow_world_readable_tmpfiles are set. chgrp was '
+ 'successful, but there is no guarantee that Ansible '
+ 'will be able to read the files after this operation, '
+ 'particularly if common_remote_group was set to a '
+ 'group of which the unprivileged become user is not a '
+ 'member. In this situation, '
+ 'allow_world_readable_tmpfiles is a no-op. See this '
+ 'URL for more details: %s'
+ '#risks-of-becoming-an-unprivileged-user' % become_link)
+ if execute:
+ group_mode = 'g+rwx'
+ else:
+ group_mode = 'g+rw'
+ res = self._remote_chmod(remote_paths, group_mode)
+ if res['rc'] == 0:
+ return remote_paths
+
+ # Step 4: World-readable temp directory
+ if self.get_shell_option('world_readable_temp'):
+ # chown and fs acls failed -- do things this insecure way only if
+ # the user opted in in the config file
+ display.warning(
+ 'Using world-readable permissions for temporary files Ansible '
+ 'needs to create when becoming an unprivileged user. This may '
+ 'be insecure. For information on securing this, see %s'
+ '#risks-of-becoming-an-unprivileged-user' % become_link)
+ res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
+ if res['rc'] == 0:
+ return remote_paths
+ raise AnsibleError(
+ 'Failed to set file mode on remote files '
+ '(rc: {0}, err: {1})'.format(
+ res['rc'],
+ to_native(res['stderr'])))
+
+ raise AnsibleError(
+ 'Failed to set permissions on the temporary files Ansible needs '
+ 'to create when becoming an unprivileged user '
+ '(rc: %s, err: %s}). For information on working around this, see %s'
+ '#risks-of-becoming-an-unprivileged-user' % (
+ res['rc'],
+ to_native(res['stderr']), become_link))
+
+ def _remote_chmod(self, paths, mode, sudoable=False):
+ '''
+ Issue a remote chmod command
+ '''
+ cmd = self._connection._shell.chmod(paths, mode)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _remote_chown(self, paths, user, sudoable=False):
+ '''
+ Issue a remote chown command
+ '''
+ cmd = self._connection._shell.chown(paths, user)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _remote_chgrp(self, paths, group, sudoable=False):
+ '''
+ Issue a remote chgrp command
+ '''
+ cmd = self._connection._shell.chgrp(paths, group)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
+ '''
+ Issue a remote call to setfacl
+ '''
+ cmd = self._connection._shell.set_user_facl(paths, user, mode)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
+ '''
+ Get information from remote file.
+ '''
+ if tmp is not None:
+ display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
+ ' plugins should set self._connection._shell.tmpdir to share'
+ ' the tmpdir')
+ del tmp # No longer used
+
+ module_args = dict(
+ path=path,
+ follow=follow,
+ get_checksum=checksum,
+ checksum_algorithm='sha1',
+ )
+ mystat = self._execute_module(module_name='ansible.legacy.stat', module_args=module_args, task_vars=all_vars,
+ wrap_async=False)
+
+ if mystat.get('failed'):
+ msg = mystat.get('module_stderr')
+ if not msg:
+ msg = mystat.get('module_stdout')
+ if not msg:
+ msg = mystat.get('msg')
+ raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
+
+ if not mystat['stat']['exists']:
+ # empty might be matched, 1 should never match, also backwards compatible
+ mystat['stat']['checksum'] = '1'
+
+ # happens sometimes when it is a dir and not on bsd
+ if 'checksum' not in mystat['stat']:
+ mystat['stat']['checksum'] = ''
+ elif not isinstance(mystat['stat']['checksum'], string_types):
+ raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
+
+ return mystat['stat']
+
+ def _remote_checksum(self, path, all_vars, follow=False):
+ """Deprecated. Use _execute_remote_stat() instead.
+
+ Produces a remote checksum given a path,
+ Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
+ 0 = unknown error
+ 1 = file does not exist, this might not be an error
+ 2 = permissions issue
+ 3 = its a directory, not a file
+ 4 = stat module failed, likely due to not finding python
+ 5 = appropriate json module not found
+ """
+ self._display.deprecated("The '_remote_checksum()' method is deprecated. "
+ "The plugin author should update the code to use '_execute_remote_stat()' instead", "2.16")
+ x = "0" # unknown error has occurred
+ try:
+ remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
+ if remote_stat['exists'] and remote_stat['isdir']:
+ x = "3" # its a directory not a file
+ else:
+ x = remote_stat['checksum'] # if 1, file is missing
+ except AnsibleError as e:
+ errormsg = to_text(e)
+ if errormsg.endswith(u'Permission denied'):
+ x = "2" # cannot read file
+ elif errormsg.endswith(u'MODULE FAILURE'):
+ x = "4" # python not found or module uncaught exception
+ elif 'json' in errormsg:
+ x = "5" # json module needed
+ finally:
+ return x # pylint: disable=lost-exception
+
+ def _remote_expand_user(self, path, sudoable=True, pathsep=None):
+ ''' takes a remote path and performs tilde/$HOME expansion on the remote host '''
+
+ # We only expand ~/path and ~username/path
+ if not path.startswith('~'):
+ return path
+
+ # Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
+ # dir there.
+ split_path = path.split(os.path.sep, 1)
+ expand_path = split_path[0]
+
+ if expand_path == '~':
+ # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
+ # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
+ # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
+ become_user = self.get_become_option('become_user')
+ if getattr(self._connection, '_remote_is_local', False):
+ pass
+ elif sudoable and self._connection.become and become_user:
+ expand_path = '~%s' % become_user
+ else:
+ # use remote user instead, if none set default to current user
+ expand_path = '~%s' % (self._get_remote_user() or '')
+
+ # use shell to construct appropriate command and execute
+ cmd = self._connection._shell.expand_user(expand_path)
+ data = self._low_level_execute_command(cmd, sudoable=False)
+
+ try:
+ initial_fragment = data['stdout'].strip().splitlines()[-1]
+ except IndexError:
+ initial_fragment = None
+
+ if not initial_fragment:
+ # Something went wrong trying to expand the path remotely. Try using pwd, if not, return
+ # the original string
+ cmd = self._connection._shell.pwd()
+ pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
+ if pwd:
+ expanded = pwd
+ else:
+ expanded = path
+
+ elif len(split_path) > 1:
+ expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
+ else:
+ expanded = initial_fragment
+
+ if '..' in os.path.dirname(expanded).split('/'):
+ raise AnsibleError("'%s' returned an invalid relative home directory path containing '..'" % self._get_remote_addr({}))
+
+ return expanded
+
+ def _strip_success_message(self, data):
+ '''
+ Removes the BECOME-SUCCESS message from the data.
+ '''
+ if data.strip().startswith('BECOME-SUCCESS-'):
+ data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
+ return data
+
+ def _update_module_args(self, module_name, module_args, task_vars):
+
+ # set check mode in the module arguments, if required
+ if self._task.check_mode:
+ if not self._supports_check_mode:
+ raise AnsibleError("check mode is not supported for this operation")
+ module_args['_ansible_check_mode'] = True
+ else:
+ module_args['_ansible_check_mode'] = False
+
+ # set no log in the module arguments, if required
+ no_target_syslog = C.config.get_config_value('DEFAULT_NO_TARGET_SYSLOG', variables=task_vars)
+ module_args['_ansible_no_log'] = self._task.no_log or no_target_syslog
+
+ # set debug in the module arguments, if required
+ module_args['_ansible_debug'] = C.DEFAULT_DEBUG
+
+ # let module know we are in diff mode
+ module_args['_ansible_diff'] = self._task.diff
+
+ # let module know our verbosity
+ module_args['_ansible_verbosity'] = display.verbosity
+
+ # give the module information about the ansible version
+ module_args['_ansible_version'] = __version__
+
+ # give the module information about its name
+ module_args['_ansible_module_name'] = module_name
+
+ # set the syslog facility to be used in the module
+ module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
+
+ # let module know about filesystems that selinux treats specially
+ module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
+
+ # what to do when parameter values are converted to strings
+ module_args['_ansible_string_conversion_action'] = C.STRING_CONVERSION_ACTION
+
+ # give the module the socket for persistent connections
+ module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
+ if not module_args['_ansible_socket']:
+ module_args['_ansible_socket'] = task_vars.get('ansible_socket')
+
+ # make sure all commands use the designated shell executable
+ module_args['_ansible_shell_executable'] = self._play_context.executable
+
+ # make sure modules are aware if they need to keep the remote files
+ module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
+
+ # make sure all commands use the designated temporary directory if created
+ if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
+ module_args['_ansible_tmpdir'] = None
+ else:
+ module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
+
+ # make sure the remote_tmp value is sent through in case modules needs to create their own
+ module_args['_ansible_remote_tmp'] = self.get_shell_option('remote_tmp', default='~/.ansible/tmp')
+
+ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False):
+ '''
+ Transfer and run a module along with its arguments.
+ '''
+ if tmp is not None:
+ display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
+ ' should set self._connection._shell.tmpdir to share the tmpdir')
+ del tmp # No longer used
+ if delete_remote_tmp is not None:
+ display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
+ ' Action plugins should check self._connection._shell.tmpdir to'
+ ' see if a tmpdir existed before they were called to determine'
+ ' if they are responsible for removing it.')
+ del delete_remote_tmp # No longer used
+
+ tmpdir = self._connection._shell.tmpdir
+
+ # We set the module_style to new here so the remote_tmp is created
+ # before the module args are built if remote_tmp is needed (async).
+ # If the module_style turns out to not be new and we didn't create the
+ # remote tmp here, it will still be created. This must be done before
+ # calling self._update_module_args() so the module wrapper has the
+ # correct remote_tmp value set
+ if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
+ self._make_tmp_path()
+ tmpdir = self._connection._shell.tmpdir
+
+ if task_vars is None:
+ task_vars = dict()
+
+ # if a module name was not specified for this execution, use the action from the task
+ if module_name is None:
+ module_name = self._task.action
+ if module_args is None:
+ module_args = self._task.args
+
+ self._update_module_args(module_name, module_args, task_vars)
+
+ remove_async_dir = None
+ if wrap_async or self._task.async_val:
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ remove_async_dir = len(self._task.environment)
+ self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir})
+
+ # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
+ (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
+ display.vvv("Using module file %s" % module_path)
+ if not shebang and module_style != 'binary':
+ raise AnsibleError("module (%s) is missing interpreter line" % module_name)
+
+ self._used_interpreter = shebang
+ remote_module_path = None
+
+ if not self._is_pipelining_enabled(module_style, wrap_async):
+ # we might need remote tmp dir
+ if tmpdir is None:
+ self._make_tmp_path()
+ tmpdir = self._connection._shell.tmpdir
+
+ remote_module_filename = self._connection._shell.get_remote_filename(module_path)
+ remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
+
+ args_file_path = None
+ if module_style in ('old', 'non_native_want_json', 'binary'):
+ # we'll also need a tmp file to hold our module arguments
+ args_file_path = self._connection._shell.join_path(tmpdir, 'args')
+
+ if remote_module_path or module_style != 'new':
+ display.debug("transferring module to remote %s" % remote_module_path)
+ if module_style == 'binary':
+ self._transfer_file(module_path, remote_module_path)
+ else:
+ self._transfer_data(remote_module_path, module_data)
+ if module_style == 'old':
+ # we need to dump the module args to a k=v string in a file on
+ # the remote system, which can be read and parsed by the module
+ args_data = ""
+ for k, v in module_args.items():
+ args_data += '%s=%s ' % (k, shlex.quote(text_type(v)))
+ self._transfer_data(args_file_path, args_data)
+ elif module_style in ('non_native_want_json', 'binary'):
+ self._transfer_data(args_file_path, json.dumps(module_args))
+ display.debug("done transferring module to remote")
+
+ environment_string = self._compute_environment_string()
+
+ # remove the ANSIBLE_ASYNC_DIR env entry if we added a temporary one for
+ # the async_wrapper task.
+ if remove_async_dir is not None:
+ del self._task.environment[remove_async_dir]
+
+ remote_files = []
+ if tmpdir and remote_module_path:
+ remote_files = [tmpdir, remote_module_path]
+
+ if args_file_path:
+ remote_files.append(args_file_path)
+
+ sudoable = True
+ in_data = None
+ cmd = ""
+
+ if wrap_async and not self._connection.always_pipeline_modules:
+ # configure, upload, and chmod the async_wrapper module
+ (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(
+ module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
+ async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
+ remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
+ self._transfer_data(remote_async_module_path, async_module_data)
+ remote_files.append(remote_async_module_path)
+
+ async_limit = self._task.async_val
+ async_jid = str(random.randint(0, 999999999999))
+
+ # call the interpreter for async_wrapper directly
+ # this permits use of a script for an interpreter on non-Linux platforms
+ interpreter = shebang.replace('#!', '').strip()
+ async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
+
+ if environment_string:
+ async_cmd.insert(0, environment_string)
+
+ if args_file_path:
+ async_cmd.append(args_file_path)
+ else:
+ # maintain a fixed number of positional parameters for async_wrapper
+ async_cmd.append('_')
+
+ if not self._should_remove_tmp_path(tmpdir):
+ async_cmd.append("-preserve_tmp")
+
+ cmd = " ".join(to_text(x) for x in async_cmd)
+
+ else:
+
+ if self._is_pipelining_enabled(module_style):
+ in_data = module_data
+ display.vvv("Pipelining is enabled.")
+ else:
+ cmd = remote_module_path
+
+ cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
+
+ # Fix permissions of the tmpdir path and tmpdir files. This should be called after all
+ # files have been transferred.
+ if remote_files:
+ # remove none/empty
+ remote_files = [x for x in remote_files if x]
+ self._fixup_perms2(remote_files, self._get_remote_user())
+
+ # actually execute
+ res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
+
+ # parse the main result
+ data = self._parse_returned_data(res)
+
+ # NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
+ # get internal info before cleaning
+ if data.pop("_ansible_suppress_tmpdir_delete", False):
+ self._cleanup_remote_tmp = False
+
+ # NOTE: yum returns results .. but that made it 'compatible' with squashing, so we allow mappings, for now
+ if 'results' in data and (not isinstance(data['results'], Sequence) or isinstance(data['results'], string_types)):
+ data['ansible_module_results'] = data['results']
+ del data['results']
+ display.warning("Found internal 'results' key in module return, renamed to 'ansible_module_results'.")
+
+ # remove internal keys
+ remove_internal_keys(data)
+
+ if wrap_async:
+ # async_wrapper will clean up its tmpdir on its own so we want the controller side to
+ # forget about it now
+ self._connection._shell.tmpdir = None
+
+ # FIXME: for backwards compat, figure out if still makes sense
+ data['changed'] = True
+
+ # pre-split stdout/stderr into lines if needed
+ if 'stdout' in data and 'stdout_lines' not in data:
+ # if the value is 'False', a default won't catch it.
+ txt = data.get('stdout', None) or u''
+ data['stdout_lines'] = txt.splitlines()
+ if 'stderr' in data and 'stderr_lines' not in data:
+ # if the value is 'False', a default won't catch it.
+ txt = data.get('stderr', None) or u''
+ data['stderr_lines'] = txt.splitlines()
+
+ # propagate interpreter discovery results back to the controller
+ if self._discovered_interpreter_key:
+ if data.get('ansible_facts') is None:
+ data['ansible_facts'] = {}
+
+ data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter
+
+ if self._discovery_warnings:
+ if data.get('warnings') is None:
+ data['warnings'] = []
+ data['warnings'].extend(self._discovery_warnings)
+
+ if self._discovery_deprecation_warnings:
+ if data.get('deprecations') is None:
+ data['deprecations'] = []
+ data['deprecations'].extend(self._discovery_deprecation_warnings)
+
+ # mark the entire module results untrusted as a template right here, since the current action could
+ # possibly template one of these values.
+ data = wrap_var(data)
+
+ display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
+ return data
+
+ def _parse_returned_data(self, res):
+ try:
+ filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''), objects_only=True)
+ for w in warnings:
+ display.warning(w)
+
+ data = json.loads(filtered_output)
+ data['_ansible_parsed'] = True
+ except ValueError:
+ # not valid json, lets try to capture error
+ data = dict(failed=True, _ansible_parsed=False)
+ data['module_stdout'] = res.get('stdout', u'')
+ if 'stderr' in res:
+ data['module_stderr'] = res['stderr']
+ if res['stderr'].startswith(u'Traceback'):
+ data['exception'] = res['stderr']
+
+ # in some cases a traceback will arrive on stdout instead of stderr, such as when using ssh with -tt
+ if 'exception' not in data and data['module_stdout'].startswith(u'Traceback'):
+ data['exception'] = data['module_stdout']
+
+ # The default
+ data['msg'] = "MODULE FAILURE"
+
+ # try to figure out if we are missing interpreter
+ if self._used_interpreter is not None:
+ interpreter = re.escape(self._used_interpreter.lstrip('!#'))
+ match = re.compile('%s: (?:No such file or directory|not found)' % interpreter)
+ if match.search(data['module_stderr']) or match.search(data['module_stdout']):
+ data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
+
+ # always append hint
+ data['msg'] += '\nSee stdout/stderr for the exact error'
+
+ if 'rc' in res:
+ data['rc'] = res['rc']
+ return data
+
+ # FIXME: move to connection base
+ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
+ '''
+ This is the function which executes the low level shell command, which
+ may be commands to create/remove directories for temporary files, or to
+ run the module code or python directly when pipelining.
+
+ :kwarg encoding_errors: If the value returned by the command isn't
+ utf-8 then we have to figure out how to transform it to unicode.
+ If the value is just going to be displayed to the user (or
+ discarded) then the default of 'replace' is fine. If the data is
+ used as a key or is going to be written back out to a file
+ verbatim, then this won't work. May have to use some sort of
+ replacement strategy (python3 could use surrogateescape)
+ :kwarg chdir: cd into this directory before executing the command.
+ '''
+
+ display.debug("_low_level_execute_command(): starting")
+ # if not cmd:
+ # # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
+ # display.debug("_low_level_execute_command(): no command, exiting")
+ # return dict(stdout='', stderr='', rc=254)
+
+ if chdir:
+ display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
+ cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
+
+ # https://github.com/ansible/ansible/issues/68054
+ if executable:
+ self._connection._shell.executable = executable
+
+ ruser = self._get_remote_user()
+ buser = self.get_become_option('become_user')
+ if (sudoable and self._connection.become and # if sudoable and have become
+ resource_from_fqcr(self._connection.transport) != 'network_cli' and # if not using network_cli
+ (C.BECOME_ALLOW_SAME_USER or (buser != ruser or not any((ruser, buser))))): # if we allow same user PE or users are different and either is set
+ display.debug("_low_level_execute_command(): using become for this command")
+ cmd = self._connection.become.build_become_command(cmd, self._connection._shell)
+
+ if self._connection.allow_executable:
+ if executable is None:
+ executable = self._play_context.executable
+ # mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
+ # only applied for the default executable to avoid interfering with the raw action
+ cmd = self._connection._shell.append_command(cmd, 'sleep 0')
+ if executable:
+ cmd = executable + ' -c ' + shlex.quote(cmd)
+
+ display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
+
+ # Change directory to basedir of task for command execution when connection is local
+ if self._connection.transport == 'local':
+ self._connection.cwd = to_bytes(self._loader.get_basedir(), errors='surrogate_or_strict')
+
+ rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ # stdout and stderr may be either a file-like or a bytes object.
+ # Convert either one to a text type
+ if isinstance(stdout, binary_type):
+ out = to_text(stdout, errors=encoding_errors)
+ elif not isinstance(stdout, text_type):
+ out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
+ else:
+ out = stdout
+
+ if isinstance(stderr, binary_type):
+ err = to_text(stderr, errors=encoding_errors)
+ elif not isinstance(stderr, text_type):
+ err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
+ else:
+ err = stderr
+
+ if rc is None:
+ rc = 0
+
+ # be sure to remove the BECOME-SUCCESS message now
+ out = self._strip_success_message(out)
+
+ display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
+ return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
+
+ def _get_diff_data(self, destination, source, task_vars, source_file=True):
+
+ # Note: Since we do not diff the source and destination before we transform from bytes into
+ # text the diff between source and destination may not be accurate. To fix this, we'd need
+ # to move the diffing from the callback plugins into here.
+ #
+ # Example of data which would cause trouble is src_content == b'\xff' and dest_content ==
+ # b'\xfe'. Neither of those are valid utf-8 so both get turned into the replacement
+ # character: diff['before'] = u'�' ; diff['after'] = u'�' When the callback plugin later
+ # diffs before and after it shows an empty diff.
+
+ diff = {}
+ display.debug("Going to peek to see if file has changed permissions")
+ peek_result = self._execute_module(
+ module_name='ansible.legacy.file', module_args=dict(path=destination, _diff_peek=True),
+ task_vars=task_vars, persist_files=True)
+
+ if peek_result.get('failed', False):
+ display.warning(u"Failed to get diff between '%s' and '%s': %s" % (os.path.basename(source), destination, to_text(peek_result.get(u'msg', u''))))
+ return diff
+
+ if peek_result.get('rc', 0) == 0:
+
+ if peek_result.get('state') in (None, 'absent'):
+ diff['before'] = u''
+ elif peek_result.get('appears_binary'):
+ diff['dst_binary'] = 1
+ elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ display.debug(u"Slurping the file %s" % source)
+ dest_result = self._execute_module(
+ module_name='ansible.legacy.slurp', module_args=dict(path=destination),
+ task_vars=task_vars, persist_files=True)
+ if 'content' in dest_result:
+ dest_contents = dest_result['content']
+ if dest_result['encoding'] == u'base64':
+ dest_contents = base64.b64decode(dest_contents)
+ else:
+ raise AnsibleError("unknown encoding in content option, failed: %s" % to_native(dest_result))
+ diff['before_header'] = destination
+ diff['before'] = to_text(dest_contents)
+
+ if source_file:
+ st = os.stat(source)
+ if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ display.debug("Reading local copy of the file %s" % source)
+ try:
+ with open(source, 'rb') as src:
+ src_contents = src.read()
+ except Exception as e:
+ raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, to_native(e)))
+
+ if b"\x00" in src_contents:
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = source
+ diff['after'] = to_text(src_contents)
+ else:
+ display.debug(u"source of file passed in")
+ diff['after_header'] = u'dynamically generated'
+ diff['after'] = source
+
+ if self._task.no_log:
+ if 'before' in diff:
+ diff["before"] = u""
+ if 'after' in diff:
+ diff["after"] = u" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
+
+ return diff
+
+ def _find_needle(self, dirname, needle):
+ '''
+ find a needle in haystack of paths, optionally using 'dirname' as a subdir.
+ This will build the ordered list of paths to search and pass them to dwim
+ to get back the first existing file found.
+ '''
+
+ # dwim already deals with playbook basedirs
+ path_stack = self._task.get_search_path()
+
+ # if missing it will return a file not found exception
+ return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
new file mode 100644
index 0000000..e569739
--- /dev/null
+++ b/lib/ansible/plugins/action/add_host.py
@@ -0,0 +1,98 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping
+
+from ansible.errors import AnsibleActionFail
+from ansible.module_utils.six import string_types
+from ansible.plugins.action import ActionBase
+from ansible.parsing.utils.addresses import parse_address
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ ''' Create inventory hosts and groups in the memory inventory'''
+
+ # We need to be able to modify the inventory
+ BYPASS_HOST_LOOP = True
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ args = self._task.args
+ raw = args.pop('_raw_params', {})
+ if isinstance(raw, Mapping):
+ # TODO: create 'conflict' detection in base class to deal with repeats and aliases and warn user
+ args = combine_vars(raw, args)
+ else:
+ raise AnsibleActionFail('Invalid raw parameters passed, requires a dictonary/mapping got a %s' % type(raw))
+
+ # Parse out any hostname:port patterns
+ new_name = args.get('name', args.get('hostname', args.get('host', None)))
+ if new_name is None:
+ raise AnsibleActionFail('name, host or hostname needs to be provided')
+
+ display.vv("creating host via 'add_host': hostname=%s" % new_name)
+
+ try:
+ name, port = parse_address(new_name, allow_ranges=False)
+ except Exception:
+ # not a parsable hostname, but might still be usable
+ name = new_name
+ port = None
+
+ if port:
+ args['ansible_ssh_port'] = port
+
+ groups = args.get('groupname', args.get('groups', args.get('group', '')))
+ # add it to the group if that was specified
+ new_groups = []
+ if groups:
+ if isinstance(groups, list):
+ group_list = groups
+ elif isinstance(groups, string_types):
+ group_list = groups.split(",")
+ else:
+ raise AnsibleActionFail("Groups must be specified as a list.", obj=self._task)
+
+ for group_name in group_list:
+ if group_name not in new_groups:
+ new_groups.append(group_name.strip())
+
+ # Add any variables to the new_host
+ host_vars = dict()
+ special_args = frozenset(('name', 'hostname', 'groupname', 'groups'))
+ for k in args.keys():
+ if k not in special_args:
+ host_vars[k] = args[k]
+
+ result['changed'] = False
+ result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars)
+ return result
diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
new file mode 100644
index 0000000..06fa2df
--- /dev/null
+++ b/lib/ansible/plugins/action/assemble.py
@@ -0,0 +1,166 @@
+# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
+# Stephen Fromm <sfromm@gmail.com>
+# Brian Coca <briancoca+dev@gmail.com>
+# Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import codecs
+import os
+import os.path
+import re
+import tempfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum_s
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
+ ''' assemble a file from a directory of fragments '''
+
+ tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = u"%s/%s" % (src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+
+ with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b'\n')
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+ if delimiter[-1] != b'\n':
+ tmp.write(b'\n')
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b'\n'):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = False
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if task_vars is None:
+ task_vars = dict()
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ delimiter = self._task.args.get('delimiter', None)
+ remote_src = self._task.args.get('remote_src', 'yes')
+ regexp = self._task.args.get('regexp', None)
+ follow = self._task.args.get('follow', False)
+ ignore_hidden = self._task.args.get('ignore_hidden', False)
+ decrypt = self._task.args.pop('decrypt', True)
+
+ try:
+ if src is None or dest is None:
+ raise AnsibleActionFail("src and dest are required")
+
+ if boolean(remote_src, strict=False):
+ # call assemble via ansible.legacy to allow library/ overrides of the module without collection search
+ result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars))
+ raise _AnsibleActionDone()
+ else:
+ try:
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ if not os.path.isdir(src):
+ raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
+
+ _re = None
+ if regexp is not None:
+ _re = re.compile(regexp)
+
+ # Does all work assembling the file
+ path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
+
+ path_checksum = checksum_s(path)
+ dest = self._remote_expand_user(dest)
+ dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
+
+ diff = {}
+
+ # setup args for running modules
+ new_module_args = self._task.args.copy()
+
+ # clean assemble specific options
+ for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
+ if opt in new_module_args:
+ del new_module_args[opt]
+ new_module_args['dest'] = dest
+
+ if path_checksum != dest_stat['checksum']:
+
+ if self._play_context.diff:
+ diff = self._get_diff_data(dest, path, task_vars)
+
+ remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
+ xfered = self._transfer_file(path, remote_path)
+
+ # fix file permissions when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
+
+ new_module_args.update(dict(src=xfered,))
+
+ res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
+ if diff:
+ res['diff'] = diff
+ result.update(res)
+ else:
+ result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars))
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
new file mode 100644
index 0000000..7721a6b
--- /dev/null
+++ b/lib/ansible/plugins/action/assert.py
@@ -0,0 +1,94 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.playbook.conditional import Conditional
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('fail_msg', 'msg', 'quiet', 'success_msg', 'that'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if 'that' not in self._task.args:
+ raise AnsibleError('conditional required in "that" string')
+
+ fail_msg = None
+ success_msg = None
+
+ fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
+ if fail_msg is None:
+ fail_msg = 'Assertion failed'
+ elif isinstance(fail_msg, list):
+ if not all(isinstance(x, string_types) for x in fail_msg):
+ raise AnsibleError('Type of one of the elements in fail_msg or msg list is not string type')
+ elif not isinstance(fail_msg, (string_types, list)):
+ raise AnsibleError('Incorrect type for fail_msg or msg, expected a string or list and got %s' % type(fail_msg))
+
+ success_msg = self._task.args.get('success_msg')
+ if success_msg is None:
+ success_msg = 'All assertions passed'
+ elif isinstance(success_msg, list):
+ if not all(isinstance(x, string_types) for x in success_msg):
+ raise AnsibleError('Type of one of the elements in success_msg list is not string type')
+ elif not isinstance(success_msg, (string_types, list)):
+ raise AnsibleError('Incorrect type for success_msg, expected a string or list and got %s' % type(success_msg))
+
+ quiet = boolean(self._task.args.get('quiet', False), strict=False)
+
+ # make sure the 'that' items are a list
+ thats = self._task.args['that']
+ if not isinstance(thats, list):
+ thats = [thats]
+
+ # Now we iterate over the that items, temporarily assigning them
+ # to the task's when value so we can evaluate the conditional using
+ # the built in evaluate function. The when has already been evaluated
+ # by this point, and is not used again, so we don't care about mangling
+ # that value now
+ cond = Conditional(loader=self._loader)
+ if not quiet:
+ result['_ansible_verbose_always'] = True
+
+ for that in thats:
+ cond.when = [that]
+ test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
+ if not test_result:
+ result['failed'] = True
+ result['evaluated_to'] = test_result
+ result['assertion'] = that
+
+ result['msg'] = fail_msg
+
+ return result
+
+ result['changed'] = False
+ result['msg'] = success_msg
+ return result
diff --git a/lib/ansible/plugins/action/async_status.py b/lib/ansible/plugins/action/async_status.py
new file mode 100644
index 0000000..ad839f1
--- /dev/null
+++ b/lib/ansible/plugins/action/async_status.py
@@ -0,0 +1,53 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleActionFail
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def _get_async_dir(self):
+
+ # async directory based on the shell option
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+
+ return self._remote_expand_user(async_dir)
+
+ def run(self, tmp=None, task_vars=None):
+
+ results = super(ActionModule, self).run(tmp, task_vars)
+
+ validation_result, new_module_args = self.validate_argument_spec(
+ argument_spec={
+ 'jid': {'type': 'str', 'required': True},
+ 'mode': {'type': 'str', 'choices': ['status', 'cleanup'], 'default': 'status'},
+ },
+ )
+
+ # initialize response
+ results['started'] = results['finished'] = 0
+ results['stdout'] = results['stderr'] = ''
+ results['stdout_lines'] = results['stderr_lines'] = []
+
+ jid = new_module_args["jid"]
+ mode = new_module_args["mode"]
+
+ results['ansible_job_id'] = jid
+ async_dir = self._get_async_dir()
+ log_path = self._connection._shell.join_path(async_dir, jid)
+
+ if mode == 'cleanup':
+ results['erased'] = log_path
+ else:
+ results['results_file'] = log_path
+ results['started'] = 1
+
+ new_module_args['_async_dir'] = async_dir
+ results = merge_hash(results, self._execute_module(module_name='ansible.legacy.async_status', task_vars=task_vars, module_args=new_module_args))
+
+ return results
diff --git a/lib/ansible/plugins/action/command.py b/lib/ansible/plugins/action/command.py
new file mode 100644
index 0000000..82a85dc
--- /dev/null
+++ b/lib/ansible/plugins/action/command.py
@@ -0,0 +1,28 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_async = True
+ results = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+ # explicitly call `ansible.legacy.command` for backcompat to allow library/ override of `command` while not allowing
+ # collections search for an unqualified `command` module
+ results = merge_hash(results, self._execute_module(module_name='ansible.legacy.command', task_vars=task_vars, wrap_async=wrap_async))
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return results
diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
new file mode 100644
index 0000000..cb3d15b
--- /dev/null
+++ b/lib/ansible/plugins/action/copy.py
@@ -0,0 +1,599 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Toshio Kuratomi <tkuraotmi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import os.path
+import stat
+import tempfile
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum
+
+
+# Supplement the FILE_COMMON_ARGUMENTS with arguments that are specific to file
+REAL_FILE_ARGS = frozenset(FILE_COMMON_ARGUMENTS.keys()).union(
+ ('state', 'path', '_original_basename', 'recurse', 'force',
+ '_diff_peek', 'src'))
+
+
+def _create_remote_file_args(module_args):
+ """remove keys that are not relevant to file"""
+ return dict((k, v) for k, v in module_args.items() if k in REAL_FILE_ARGS)
+
+
+def _create_remote_copy_args(module_args):
+ """remove action plugin only keys"""
+ return dict((k, v) for k, v in module_args.items() if k not in ('content', 'decrypt'))
+
+
+def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None):
+ """
+ Walk a filesystem tree returning enough information to copy the files
+
+ :arg topdir: The directory that the filesystem tree is rooted at
+ :kwarg base_path: The initial directory structure to strip off of the
+ files for the destination directory. If this is None (the default),
+ the base_path is set to ``top_dir``.
+ :kwarg local_follow: Whether to follow symlinks on the source. When set
+ to False, no symlinks are dereferenced. When set to True (the
+ default), the code will dereference most symlinks. However, symlinks
+ can still be present if needed to break a circular link.
+ :kwarg trailing_slash_detector: Function to determine if a path has
+ a trailing directory separator. Only needed when dealing with paths on
+ a remote machine (in which case, pass in a function that is aware of the
+ directory separator conventions on the remote machine).
+ :returns: dictionary of tuples. All of the path elements in the structure are text strings.
+ This separates all the files, directories, and symlinks along with
+ important information about each::
+
+ { 'files': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
+ 'directories': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
+ 'symlinks': [('/symlink/target/path', 'relative/path/to/copy/to'), ...],
+ }
+
+ The ``symlinks`` field is only populated if ``local_follow`` is set to False
+ *or* a circular symlink cannot be dereferenced.
+
+ """
+ # Convert the path segments into byte strings
+
+ r_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ def _recurse(topdir, rel_offset, parent_dirs, rel_base=u''):
+ """
+ This is a closure (function utilizing variables from it's parent
+ function's scope) so that we only need one copy of all the containers.
+ Note that this function uses side effects (See the Variables used from
+ outer scope).
+
+ :arg topdir: The directory we are walking for files
+ :arg rel_offset: Integer defining how many characters to strip off of
+ the beginning of a path
+ :arg parent_dirs: Directories that we're copying that this directory is in.
+ :kwarg rel_base: String to prepend to the path after ``rel_offset`` is
+ applied to form the relative path.
+
+ Variables used from the outer scope
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :r_files: Dictionary of files in the hierarchy. See the return value
+ for :func:`walk` for the structure of this dictionary.
+ :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
+ """
+ for base_path, sub_folders, files in os.walk(topdir):
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
+
+ if os.path.islink(filepath):
+ # Dereference the symlnk
+ real_file = os.path.realpath(filepath)
+ if local_follow and os.path.isfile(real_file):
+ # Add the file pointed to by the symlink
+ r_files['files'].append((real_file, dest_filepath))
+ else:
+ # Mark this file as a symlink to copy
+ r_files['symlinks'].append((os.readlink(filepath), dest_filepath))
+ else:
+ # Just a normal file
+ r_files['files'].append((filepath, dest_filepath))
+
+ for dirname in sub_folders:
+ dirpath = os.path.join(base_path, dirname)
+ dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
+ real_dir = os.path.realpath(dirpath)
+ dir_stats = os.stat(real_dir)
+
+ if os.path.islink(dirpath):
+ if local_follow:
+ if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
+ # Just insert the symlink if the target directory
+ # exists inside of the copy already
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Walk the dirpath to find all parent directories.
+ new_parents = set()
+ parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
+ for parent in range(len(parent_dir_list), 0, -1):
+ parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
+ if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
+ # Reached the point at which the directory
+ # tree is already known. Don't add any
+ # more or we might go to an ancestor that
+ # isn't being copied.
+ break
+ new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
+
+ if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
+ # This was a a circular symlink. So add it as
+ # a symlink
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Walk the directory pointed to by the symlink
+ r_files['directories'].append((real_dir, dest_dirpath))
+ offset = len(real_dir) + 1
+ _recurse(real_dir, offset, parent_dirs.union(new_parents), rel_base=dest_dirpath)
+ else:
+ # Add the symlink to the destination
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Just a normal directory
+ r_files['directories'].append((dirpath, dest_dirpath))
+
+ # Check if the source ends with a "/" so that we know which directory
+ # level to work at (similar to rsync)
+ source_trailing_slash = False
+ if trailing_slash_detector:
+ source_trailing_slash = trailing_slash_detector(topdir)
+ else:
+ source_trailing_slash = topdir.endswith(os.path.sep)
+
+ # Calculate the offset needed to strip the base_path to make relative
+ # paths
+ if base_path is None:
+ base_path = topdir
+ if not source_trailing_slash:
+ base_path = os.path.dirname(base_path)
+ if topdir.startswith(base_path):
+ offset = len(base_path)
+
+ # Make sure we're making the new paths relative
+ if trailing_slash_detector and not trailing_slash_detector(base_path):
+ offset += 1
+ elif not base_path.endswith(os.path.sep):
+ offset += 1
+
+ if os.path.islink(topdir) and not local_follow:
+ r_files['symlinks'] = (os.readlink(topdir), os.path.basename(topdir))
+ return r_files
+
+ dir_stats = os.stat(topdir)
+ parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
+ # Actually walk the directory hierarchy
+ _recurse(topdir, offset, parents)
+
+ return r_files
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ # This is not automatic.
+ # NOTE: do not add to this. This should be made a generic function for action plugins.
+ # This should also use the same argspec as the module instead of keeping it in sync.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ # NOTE: Should be removed in the future. For now keep this broken
+ # behaviour, have a look in the PR 51582
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ if isinstance(result['invocation'], dict):
+ if 'content' in result['invocation']:
+ result['invocation']['content'] = 'CENSORED: content is a no_log parameter'
+ if result['invocation'].get('module_args', {}).get('content') is not None:
+ result['invocation']['module_args']['content'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ return result
+
+ def _copy_file(self, source_full, source_rel, content, content_tempfile,
+ dest, task_vars, follow):
+ decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
+ force = boolean(self._task.args.get('force', 'yes'), strict=False)
+ raw = boolean(self._task.args.get('raw', 'no'), strict=False)
+
+ result = {}
+ result['diff'] = []
+
+ # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
+ try:
+ source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
+ except AnsibleFileNotFound as e:
+ result['failed'] = True
+ result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
+ return result
+
+ # Get the local mode and set if user wanted it preserved
+ # https://github.com/ansible/ansible-modules-core/issues/1124
+ lmode = None
+ if self._task.args.get('mode', None) == 'preserve':
+ lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
+
+ # This is kind of optimization - if user told us destination is
+ # dir, do path manipulation right away, otherwise we still check
+ # for dest being a dir via remote call below.
+ if self._connection._shell.path_has_trailing_slash(dest):
+ dest_file = self._connection._shell.join_path(dest, source_rel)
+ else:
+ dest_file = dest
+
+ # Attempt to get remote file info
+ dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
+
+ if dest_status['exists'] and dest_status['isdir']:
+ # The dest is a directory.
+ if content is not None:
+ # If source was defined as content remove the temporary file and fail out.
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ result['failed'] = True
+ result['msg'] = "can not use content with a dir as dest"
+ return result
+ else:
+ # Append the relative source location to the destination and get remote stats again
+ dest_file = self._connection._shell.join_path(dest, source_rel)
+ dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
+
+ if dest_status['exists'] and not force:
+ # remote_file exists so continue to next iteration.
+ return None
+
+ # Generate a hash of the local file.
+ local_checksum = checksum(source_full)
+
+ if local_checksum != dest_status['checksum']:
+ # The checksums don't match and we will change or error out.
+
+ if self._play_context.diff and not raw:
+ result['diff'].append(self._get_diff_data(dest_file, source_full, task_vars))
+
+ if self._play_context.check_mode:
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ result['changed'] = True
+ return result
+
+ # Define a remote directory that we will copy the file to.
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
+
+ remote_path = None
+
+ if not raw:
+ remote_path = self._transfer_file(source_full, tmp_src)
+ else:
+ self._transfer_file(source_full, dest_file)
+
+ # We have copied the file remotely and no longer require our content_tempfile
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._loader.cleanup_tmp_file(source_full)
+
+ # FIXME: I don't think this is needed when PIPELINING=0 because the source is created
+ # world readable. Access to the directory itself is controlled via fixup_perms2() as
+ # part of executing the module. Check that umask with scp/sftp/piped doesn't cause
+ # a problem before acting on this idea. (This idea would save a round-trip)
+ # fix file permissions when the copy is done as a different user
+ if remote_path:
+ self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
+
+ if raw:
+ # Continue to next iteration if raw is defined.
+ return None
+
+ # Run the copy module
+
+ # src and dest here come after original and override them
+ # we pass dest only to make sure it includes trailing slash in case of recursive copy
+ new_module_args = _create_remote_copy_args(self._task.args)
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ dest=dest,
+ _original_basename=source_rel,
+ follow=follow
+ )
+ )
+ if not self._task.args.get('checksum'):
+ new_module_args['checksum'] = local_checksum
+
+ if lmode:
+ new_module_args['mode'] = lmode
+
+ module_return = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
+
+ else:
+ # no need to transfer the file, already correct hash, but still need to call
+ # the file module in case we want to change attributes
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._loader.cleanup_tmp_file(source_full)
+
+ if raw:
+ return None
+
+ # Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
+ # If checksums match, and follow = True, find out if 'dest' is a link. If so,
+ # change it to point to the source of the link.
+ if follow:
+ dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False)
+ if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
+ dest = dest_status_nofollow['lnk_source']
+
+ # Build temporary module_args.
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args.update(
+ dict(
+ dest=dest,
+ _original_basename=source_rel,
+ recurse=False,
+ state='file',
+ )
+ )
+ # src is sent to the file module in _original_basename, not in src
+ try:
+ del new_module_args['src']
+ except KeyError:
+ pass
+
+ if lmode:
+ new_module_args['mode'] = lmode
+
+ # Execute the file module.
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+
+ if not module_return.get('checksum'):
+ module_return['checksum'] = local_checksum
+
+ result.update(module_return)
+ return result
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def _remove_tempfile_if_content_defined(self, content, content_tempfile):
+ if content is not None:
+ os.remove(content_tempfile)
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ content = self._task.args.get('content', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ local_follow = boolean(self._task.args.get('local_follow', True), strict=False)
+
+ result['failed'] = True
+ if not source and content is None:
+ result['msg'] = 'src (or content) is required'
+ elif not dest:
+ result['msg'] = 'dest is required'
+ elif source and content is not None:
+ result['msg'] = 'src and content are mutually exclusive'
+ elif content is not None and dest is not None and dest.endswith("/"):
+ result['msg'] = "can not use content with a dir as dest"
+ else:
+ del result['failed']
+
+ if result.get('failed'):
+ return self._ensure_invocation(result)
+
+ # Define content_tempfile in case we set it after finding content populated.
+ content_tempfile = None
+
+ # If content is defined make a tmp file and write the content into it.
+ if content is not None:
+ try:
+ # If content comes to us as a dict it should be decoded json.
+ # We need to encode it back into a string to write it out.
+ if isinstance(content, dict) or isinstance(content, list):
+ content_tempfile = self._create_content_tempfile(json.dumps(content))
+ else:
+ content_tempfile = self._create_content_tempfile(content)
+ source = content_tempfile
+ except Exception as err:
+ result['failed'] = True
+ result['msg'] = "could not write content temp file: %s" % to_native(err)
+ return self._ensure_invocation(result)
+
+ # if we have first_available_file in our vars
+ # look up the files and use the first one we find as src
+ elif remote_src:
+ result.update(self._execute_module(module_name='ansible.legacy.copy', task_vars=task_vars))
+ return self._ensure_invocation(result)
+ else:
+ # find_needle returns a path that may not have a trailing slash on
+ # a directory so we need to determine that now (we use it just
+ # like rsync does to figure out whether to include the directory
+ # or only the files inside the directory
+ trailing_slash = source.endswith(os.path.sep)
+ try:
+ # find in expected paths
+ source = self._find_needle('files', source)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return self._ensure_invocation(result)
+
+ if trailing_slash != source.endswith(os.path.sep):
+ if source[-1] == os.path.sep:
+ source = source[:-1]
+ else:
+ source = source + os.path.sep
+
+ # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
+ source_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ # If source is a directory populate our list else source is a file and translate it to a tuple.
+ if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
+ # Get a list of the files we want to replicate on the remote side
+ source_files = _walk_dirs(source, local_follow=local_follow,
+ trailing_slash_detector=self._connection._shell.path_has_trailing_slash)
+
+ # If it's recursive copy, destination is always a dir,
+ # explicitly mark it so (note - copy module relies on this).
+ if not self._connection._shell.path_has_trailing_slash(dest):
+ dest = self._connection._shell.join_path(dest, '')
+ # FIXME: Can we optimize cases where there's only one file, no
+ # symlinks and any number of directories? In the original code,
+ # empty directories are not copied....
+ else:
+ source_files['files'] = [(source, os.path.basename(source))]
+
+ changed = False
+ module_return = dict(changed=False)
+
+ # A register for if we executed a module.
+ # Used to cut down on command calls when not recursive.
+ module_executed = False
+
+ # expand any user home dir specifier
+ dest = self._remote_expand_user(dest)
+
+ implicit_directories = set()
+ for source_full, source_rel in source_files['files']:
+ # copy files over. This happens first as directories that have
+ # a file do not need to be created later
+
+ # We only follow symlinks for files in the non-recursive case
+ if source_files['directories']:
+ follow = False
+ else:
+ follow = boolean(self._task.args.get('follow', False), strict=False)
+
+ module_return = self._copy_file(source_full, source_rel, content, content_tempfile, dest, task_vars, follow)
+ if module_return is None:
+ continue
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ paths = os.path.split(source_rel)
+ dir_path = ''
+ for dir_component in paths:
+ os.path.join(dir_path, dir_component)
+ implicit_directories.add(dir_path)
+ if 'diff' in result and not result['diff']:
+ del result['diff']
+ module_executed = True
+ changed = changed or module_return.get('changed', False)
+
+ for src, dest_path in source_files['directories']:
+ # Find directories that are leaves as they might not have been
+ # created yet.
+ if dest_path in implicit_directories:
+ continue
+
+ # Use file module to create these
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args['path'] = os.path.join(dest, dest_path)
+ new_module_args['state'] = 'directory'
+ new_module_args['mode'] = self._task.args.get('directory_mode', None)
+ new_module_args['recurse'] = False
+ del new_module_args['src']
+
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ module_executed = True
+ changed = changed or module_return.get('changed', False)
+
+ for target_path, dest_path in source_files['symlinks']:
+ # Copy symlinks over
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args['path'] = os.path.join(dest, dest_path)
+ new_module_args['src'] = target_path
+ new_module_args['state'] = 'link'
+ new_module_args['force'] = True
+
+ # Only follow remote symlinks in the non-recursive case
+ if source_files['directories']:
+ new_module_args['follow'] = False
+
+ # file module cannot deal with 'preserve' mode and is meaningless
+ # for symlinks anyway, so just don't pass it.
+ if new_module_args.get('mode', None) == 'preserve':
+ new_module_args.pop('mode')
+
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+ module_executed = True
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ changed = changed or module_return.get('changed', False)
+
+ if module_executed and len(source_files['files']) == 1:
+ result.update(module_return)
+
+ # the file module returns the file path as 'path', but
+ # the copy module uses 'dest', so add it if it's not there
+ if 'path' in result and 'dest' not in result:
+ result['dest'] = result['path']
+ else:
+ result.update(dict(dest=dest, src=source, changed=changed))
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return self._ensure_invocation(result)
diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
new file mode 100644
index 0000000..2584fd3
--- /dev/null
+++ b/lib/ansible/plugins/action/debug.py
@@ -0,0 +1,80 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+# Copyright 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ ''' Print statements during execution '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('msg', 'var', 'verbosity'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ if 'msg' in self._task.args and 'var' in self._task.args:
+ return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # get task verbosity
+ verbosity = int(self._task.args.get('verbosity', 0))
+
+ if verbosity <= self._display.verbosity:
+ if 'msg' in self._task.args:
+ result['msg'] = self._task.args['msg']
+
+ elif 'var' in self._task.args:
+ try:
+ results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True)
+ if results == self._task.args['var']:
+ # if results is not str/unicode type, raise an exception
+ if not isinstance(results, string_types):
+ raise AnsibleUndefinedVariable
+ # If var name is same as result, try to template it
+ results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
+ except AnsibleUndefinedVariable as e:
+ results = u"VARIABLE IS NOT DEFINED!"
+ if self._display.verbosity > 0:
+ results += u": %s" % to_text(e)
+
+ if isinstance(self._task.args['var'], (list, dict)):
+ # If var is a list or dict, use the type as key to display
+ result[to_text(type(self._task.args['var']))] = results
+ else:
+ result[self._task.args['var']] = results
+ else:
+ result['msg'] = 'Hello world!'
+
+ # force flag to make debug output module always verbose
+ result['_ansible_verbose_always'] = True
+ else:
+ result['skipped_reason'] = "Verbosity threshold not met."
+ result['skipped'] = True
+
+ result['failed'] = False
+
+ return result
diff --git a/lib/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py
new file mode 100644
index 0000000..8d3450c
--- /dev/null
+++ b/lib/ansible/plugins/action/fail.py
@@ -0,0 +1,43 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('msg',))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ msg = 'Failed as requested from task'
+ if self._task.args and 'msg' in self._task.args:
+ msg = self._task.args.get('msg')
+
+ result['failed'] = True
+ result['msg'] = msg
+ return result
diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
new file mode 100644
index 0000000..992ba5a
--- /dev/null
+++ b/lib/ansible/plugins/action/fetch.py
@@ -0,0 +1,207 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import base64
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleActionSkip
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
+from ansible.utils.path import makedirs_safe, is_subpath
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for fetch operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ try:
+ if self._play_context.check_mode:
+ raise AnsibleActionSkip('check mode not (yet) supported for this module')
+
+ source = self._task.args.get('src', None)
+ original_dest = dest = self._task.args.get('dest', None)
+ flat = boolean(self._task.args.get('flat'), strict=False)
+ fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
+ validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
+
+ msg = ''
+ # validate source and dest are strings FIXME: use basic.py and module specs
+ if not isinstance(source, string_types):
+ msg = "Invalid type supplied for source option, it must be a string"
+
+ if not isinstance(dest, string_types):
+ msg = "Invalid type supplied for dest option, it must be a string"
+
+ if source is None or dest is None:
+ msg = "src and dest are required"
+
+ if msg:
+ raise AnsibleActionFail(msg)
+
+ source = self._connection._shell.join_path(source)
+ source = self._remote_expand_user(source)
+
+ remote_stat = {}
+ remote_checksum = None
+ if not self._connection.become:
+ # Get checksum for the remote file. Don't bother if using become as slurp will be used.
+ # Follow symlinks because fetch always follows symlinks
+ try:
+ remote_stat = self._execute_remote_stat(source, all_vars=task_vars, follow=True)
+ except AnsibleError as ae:
+ result['changed'] = False
+ result['file'] = source
+ if fail_on_missing:
+ result['failed'] = True
+ result['msg'] = to_text(ae)
+ else:
+ result['msg'] = "%s, ignored" % to_text(ae, errors='surrogate_or_replace')
+
+ return result
+
+ remote_checksum = remote_stat.get('checksum')
+ if remote_stat.get('exists'):
+ if remote_stat.get('isdir'):
+ result['failed'] = True
+ result['changed'] = False
+ result['msg'] = "remote file is a directory, fetch cannot work on directories"
+
+ # Historically, these don't fail because you may want to transfer
+ # a log file that possibly MAY exist but keep going to fetch other
+ # log files. Today, this is better achieved by adding
+ # ignore_errors or failed_when to the task. Control the behaviour
+ # via fail_when_missing
+ if not fail_on_missing:
+ result['msg'] += ", not transferring, ignored"
+ del result['changed']
+ del result['failed']
+
+ return result
+
+ # use slurp if permissions are lacking or privilege escalation is needed
+ remote_data = None
+ if remote_checksum in (None, '1', ''):
+ slurpres = self._execute_module(module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars)
+ if slurpres.get('failed'):
+ if not fail_on_missing:
+ result['file'] = source
+ result['changed'] = False
+ else:
+ result.update(slurpres)
+
+ if 'not found' in slurpres.get('msg', ''):
+ result['msg'] = "the remote file does not exist, not transferring, ignored"
+ elif slurpres.get('msg', '').startswith('source is a directory'):
+ result['msg'] = "remote file is a directory, fetch cannot work on directories"
+
+ return result
+ else:
+ if slurpres['encoding'] == 'base64':
+ remote_data = base64.b64decode(slurpres['content'])
+ if remote_data is not None:
+ remote_checksum = checksum_s(remote_data)
+
+ # calculate the destination name
+ if os.path.sep not in self._connection._shell.join_path('a', ''):
+ source = self._connection._shell._unquote(source)
+ source_local = source.replace('\\', '/')
+ else:
+ source_local = source
+
+ # ensure we only use file name, avoid relative paths
+ if not is_subpath(dest, original_dest):
+ # TODO: ? dest = os.path.expanduser(dest.replace(('../','')))
+ raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest))
+
+ if flat:
+ if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
+ raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory")
+ if dest.endswith(os.sep):
+ # if the path ends with "/", we'll use the source filename as the
+ # destination filename
+ base = os.path.basename(source_local)
+ dest = os.path.join(dest, base)
+ if not dest.startswith("/"):
+ # if dest does not start with "/", we'll assume a relative path
+ dest = self._loader.path_dwim(dest)
+ else:
+ # files are saved in dest dir, with a subdir for each host, then the filename
+ if 'inventory_hostname' in task_vars:
+ target_name = task_vars['inventory_hostname']
+ else:
+ target_name = self._play_context.remote_addr
+ dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
+
+ dest = os.path.normpath(dest)
+
+ # calculate checksum for the local file
+ local_checksum = checksum(dest)
+
+ if remote_checksum != local_checksum:
+ # create the containing directories, if needed
+ makedirs_safe(os.path.dirname(dest))
+
+ # fetch the file and check for changes
+ if remote_data is None:
+ self._connection.fetch_file(source, dest)
+ else:
+ try:
+ f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
+ f.write(remote_data)
+ f.close()
+ except (IOError, OSError) as e:
+ raise AnsibleActionFail("Failed to fetch the file: %s" % e)
+ new_checksum = secure_hash(dest)
+ # For backwards compatibility. We'll return None on FIPS enabled systems
+ try:
+ new_md5 = md5(dest)
+ except ValueError:
+ new_md5 = None
+
+ if validate_checksum and new_checksum != remote_checksum:
+ result.update(dict(failed=True, md5sum=new_md5,
+ msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
+ checksum=new_checksum, remote_checksum=remote_checksum))
+ else:
+ result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
+ 'remote_md5sum': None, 'checksum': new_checksum,
+ 'remote_checksum': remote_checksum})
+ else:
+ # For backwards compatibility. We'll return None on FIPS enabled systems
+ try:
+ local_md5 = md5(dest)
+ except ValueError:
+ local_md5 = None
+ result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
+
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/gather_facts.py b/lib/ansible/plugins/action/gather_facts.py
new file mode 100644
index 0000000..3ff7beb
--- /dev/null
+++ b/lib/ansible/plugins/action/gather_facts.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from ansible import constants as C
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def _get_module_args(self, fact_module, task_vars):
+
+ mod_args = self._task.args.copy()
+
+ # deal with 'setup specific arguments'
+ if fact_module not in C._ACTION_SETUP:
+ # TODO: remove in favor of controller side argspec detecing valid arguments
+ # network facts modules must support gather_subset
+ try:
+ name = self._connection.ansible_name.removeprefix('ansible.netcommon.')
+ except AttributeError:
+ name = self._connection._load_name.split('.')[-1]
+ if name not in ('network_cli', 'httpapi', 'netconf'):
+ subset = mod_args.pop('gather_subset', None)
+ if subset not in ('all', ['all']):
+ self._display.warning('Ignoring subset(%s) for %s' % (subset, fact_module))
+
+ timeout = mod_args.pop('gather_timeout', None)
+ if timeout is not None:
+ self._display.warning('Ignoring timeout(%s) for %s' % (timeout, fact_module))
+
+ fact_filter = mod_args.pop('filter', None)
+ if fact_filter is not None:
+ self._display.warning('Ignoring filter(%s) for %s' % (fact_filter, fact_module))
+
+ # Strip out keys with ``None`` values, effectively mimicking ``omit`` behavior
+ # This ensures we don't pass a ``None`` value as an argument expecting a specific type
+ mod_args = dict((k, v) for k, v in mod_args.items() if v is not None)
+
+ # handle module defaults
+ resolved_fact_module = self._shared_loader_obj.module_loader.find_plugin_with_context(
+ fact_module, collection_list=self._task.collections
+ ).resolved_fqcn
+
+ mod_args = get_action_args_with_defaults(
+ resolved_fact_module, mod_args, self._task.module_defaults, self._templar,
+ action_groups=self._task._parent._play._action_groups
+ )
+
+ return mod_args
+
+ def _combine_task_result(self, result, task_result):
+ filtered_res = {
+ 'ansible_facts': task_result.get('ansible_facts', {}),
+ 'warnings': task_result.get('warnings', []),
+ 'deprecations': task_result.get('deprecations', []),
+ }
+
+ # on conflict the last plugin processed wins, but try to do deep merge and append to lists.
+ return merge_hash(result, filtered_res, list_merge='append_rp')
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ result['ansible_facts'] = {}
+
+ # copy the value with list() so we don't mutate the config
+ modules = list(C.config.get_config_value('FACTS_MODULES', variables=task_vars))
+
+ parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
+ if 'smart' in modules:
+ connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
+ network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
+ modules.extend([connection_map.get(network_os or self._connection.ansible_name, 'ansible.legacy.setup')])
+ modules.pop(modules.index('smart'))
+
+ failed = {}
+ skipped = {}
+
+ if parallel is None and len(modules) >= 1:
+ parallel = True
+ else:
+ parallel = boolean(parallel)
+
+ if parallel:
+ # serially execute each module
+ for fact_module in modules:
+ # just one module, no need for fancy async
+ mod_args = self._get_module_args(fact_module, task_vars)
+ res = self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=False)
+ if res.get('failed', False):
+ failed[fact_module] = res
+ elif res.get('skipped', False):
+ skipped[fact_module] = res
+ else:
+ result = self._combine_task_result(result, res)
+
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ else:
+ # do it async
+ jobs = {}
+ for fact_module in modules:
+ mod_args = self._get_module_args(fact_module, task_vars)
+ self._display.vvvv("Running %s" % fact_module)
+ jobs[fact_module] = (self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=True))
+
+ while jobs:
+ for module in jobs:
+ poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])}
+ res = self._execute_module(module_name='ansible.legacy.async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False)
+ if res.get('finished', 0) == 1:
+ if res.get('failed', False):
+ failed[module] = res
+ elif res.get('skipped', False):
+ skipped[module] = res
+ else:
+ result = self._combine_task_result(result, res)
+ del jobs[module]
+ break
+ else:
+ time.sleep(0.1)
+ else:
+ time.sleep(0.5)
+
+ if skipped:
+ result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys()))
+ result['skipped_modules'] = skipped
+ if len(skipped) == len(modules):
+ result['skipped'] = True
+
+ if failed:
+ result['failed'] = True
+ result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys()))
+ result['failed_modules'] = failed
+
+ # tell executor facts were gathered
+ result['ansible_facts']['_ansible_facts_gathered'] = True
+
+ # hack to keep --verbose from showing all the setup module result
+ result['_ansible_verbose_override'] = True
+
+ return result
diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
new file mode 100644
index 0000000..0958ad8
--- /dev/null
+++ b/lib/ansible/plugins/action/group_by.py
@@ -0,0 +1,51 @@
+# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+
+
+class ActionModule(ActionBase):
+ ''' Create inventory groups based on variables '''
+
+ # We need to be able to modify the inventory
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('key', 'parents'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if 'key' not in self._task.args:
+ result['failed'] = True
+ result['msg'] = "the 'key' param is required when using group_by"
+ return result
+
+ group_name = self._task.args.get('key')
+ parent_groups = self._task.args.get('parents', ['all'])
+ if isinstance(parent_groups, string_types):
+ parent_groups = [parent_groups]
+
+ result['changed'] = False
+ result['add_group'] = group_name.replace(' ', '-')
+ result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups]
+ return result
diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
new file mode 100644
index 0000000..3c3cb9e
--- /dev/null
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -0,0 +1,290 @@
+# Copyright: (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os import path, walk
+import re
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import combine_vars
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
+ VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions']
+ VALID_FILE_ARGUMENTS = ['file', '_raw_params']
+ VALID_ALL = ['name', 'hash_behaviour']
+
+ def _set_dir_defaults(self):
+ if not self.depth:
+ self.depth = 0
+
+ if self.files_matching:
+ self.matcher = re.compile(r'{0}'.format(self.files_matching))
+ else:
+ self.matcher = None
+
+ if not self.ignore_files:
+ self.ignore_files = list()
+
+ if isinstance(self.ignore_files, string_types):
+ self.ignore_files = self.ignore_files.split()
+
+ elif isinstance(self.ignore_files, dict):
+ return {
+ 'failed': True,
+ 'message': '{0} must be a list'.format(self.ignore_files)
+ }
+
+ def _set_args(self):
+ """ Set instance variables based on the arguments that were passed """
+
+ self.hash_behaviour = self._task.args.get('hash_behaviour', None)
+ self.return_results_as_name = self._task.args.get('name', None)
+ self.source_dir = self._task.args.get('dir', None)
+ self.source_file = self._task.args.get('file', None)
+ if not self.source_dir and not self.source_file:
+ self.source_file = self._task.args.get('_raw_params')
+ if self.source_file:
+ self.source_file = self.source_file.rstrip('\n')
+
+ self.depth = self._task.args.get('depth', None)
+ self.files_matching = self._task.args.get('files_matching', None)
+ self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False)
+ self.ignore_files = self._task.args.get('ignore_files', None)
+ self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
+
+ # convert/validate extensions list
+ if isinstance(self.valid_extensions, string_types):
+ self.valid_extensions = list(self.valid_extensions)
+ if not isinstance(self.valid_extensions, list):
+ raise AnsibleError('Invalid type for "extensions" option, it must be a list')
+
+ def run(self, tmp=None, task_vars=None):
+ """ Load yml files recursively from a directory.
+ """
+ del tmp # tmp no longer has any effect
+
+ if task_vars is None:
+ task_vars = dict()
+
+ self.show_content = True
+ self.included_files = []
+
+ # Validate arguments
+ dirs = 0
+ files = 0
+ for arg in self._task.args:
+ if arg in self.VALID_DIR_ARGUMENTS:
+ dirs += 1
+ elif arg in self.VALID_FILE_ARGUMENTS:
+ files += 1
+ elif arg in self.VALID_ALL:
+ pass
+ else:
+ raise AnsibleError('{0} is not a valid option in include_vars'.format(to_native(arg)))
+
+ if dirs and files:
+ raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible")
+
+ # set internal vars from args
+ self._set_args()
+
+ results = dict()
+ failed = False
+ if self.source_dir:
+ self._set_dir_defaults()
+ self._set_root_dir()
+ if not path.exists(self.source_dir):
+ failed = True
+ err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir)))
+ elif not path.isdir(self.source_dir):
+ failed = True
+ err_msg = ('{0} is not a directory'.format(to_native(self.source_dir)))
+ else:
+ for root_dir, filenames in self._traverse_dir_depth():
+ failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
+ if failed:
+ break
+ results.update(updated_results)
+ else:
+ try:
+ self.source_file = self._find_needle('vars', self.source_file)
+ failed, err_msg, updated_results = (
+ self._load_files(self.source_file)
+ )
+ if not failed:
+ results.update(updated_results)
+
+ except AnsibleError as e:
+ failed = True
+ err_msg = to_native(e)
+
+ if self.return_results_as_name:
+ scope = dict()
+ scope[self.return_results_as_name] = results
+ results = scope
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+
+ if failed:
+ result['failed'] = failed
+ result['message'] = err_msg
+ elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR:
+ merge_hashes = self.hash_behaviour == 'merge'
+ for key, value in results.items():
+ old_value = task_vars.get(key, None)
+ results[key] = combine_vars(old_value, value, merge=merge_hashes)
+
+ result['ansible_included_var_files'] = self.included_files
+ result['ansible_facts'] = results
+ result['_ansible_no_log'] = not self.show_content
+
+ return result
+
+ def _set_root_dir(self):
+ if self._task._role:
+ if self.source_dir.split('/')[0] == 'vars':
+ path_to_use = (
+ path.join(self._task._role._role_path, self.source_dir)
+ )
+ if path.exists(path_to_use):
+ self.source_dir = path_to_use
+ else:
+ path_to_use = (
+ path.join(
+ self._task._role._role_path, 'vars', self.source_dir
+ )
+ )
+ self.source_dir = path_to_use
+ else:
+ if hasattr(self._task._ds, '_data_source'):
+ current_dir = (
+ "/".join(self._task._ds._data_source.split('/')[:-1])
+ )
+ self.source_dir = path.join(current_dir, self.source_dir)
+
+ def _log_walk(self, error):
+ self._display.vvv('Issue with walking through "%s": %s' % (to_native(error.filename), to_native(error)))
+
+ def _traverse_dir_depth(self):
+ """ Recursively iterate over a directory and sort the files in
+ alphabetical order. Do not iterate pass the set depth.
+ The default depth is unlimited.
+ """
+ current_depth = 0
+ sorted_walk = list(walk(self.source_dir, onerror=self._log_walk))
+ sorted_walk.sort(key=lambda x: x[0])
+ for current_root, current_dir, current_files in sorted_walk:
+ current_depth += 1
+ if current_depth <= self.depth or self.depth == 0:
+ current_files.sort()
+ yield (current_root, current_files)
+ else:
+ break
+
+ def _ignore_file(self, filename):
+ """ Return True if a file matches the list of ignore_files.
+ Args:
+ filename (str): The filename that is being matched against.
+
+ Returns:
+ Boolean
+ """
+ for file_type in self.ignore_files:
+ try:
+ if re.search(r'{0}$'.format(file_type), filename):
+ return True
+ except Exception:
+ err_msg = 'Invalid regular expression: {0}'.format(file_type)
+ raise AnsibleError(err_msg)
+ return False
+
+ def _is_valid_file_ext(self, source_file):
+ """ Verify if source file has a valid extension
+ Args:
+ source_file (str): The full path of source file or source file.
+ Returns:
+ Bool
+ """
+ file_ext = path.splitext(source_file)
+ return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
+
+ def _load_files(self, filename, validate_extensions=False):
+ """ Loads a file and converts the output into a valid Python dict.
+ Args:
+ filename (str): The source file.
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ results = dict()
+ failed = False
+ err_msg = ''
+ if validate_extensions and not self._is_valid_file_ext(filename):
+ failed = True
+ err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions)))
+ else:
+ b_data, show_content = self._loader._get_file_contents(filename)
+ data = to_text(b_data, errors='surrogate_or_strict')
+
+ self.show_content = show_content
+ data = self._loader.load(data, file_name=filename, show_content=show_content)
+ if not data:
+ data = dict()
+ if not isinstance(data, dict):
+ failed = True
+ err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
+ else:
+ self.included_files.append(filename)
+ results.update(data)
+
+ return failed, err_msg, results
+
+ def _load_files_in_dir(self, root_dir, var_files):
+ """ Load the found yml files and update/overwrite the dictionary.
+ Args:
+ root_dir (str): The base directory of the list of files that is being passed.
+ var_files: (list): List of files to iterate over and load into a dictionary.
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ results = dict()
+ failed = False
+ err_msg = ''
+ for filename in var_files:
+ stop_iter = False
+ # Never include main.yml from a role, as that is the default included by the role
+ if self._task._role:
+ if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
+ stop_iter = True
+ continue
+
+ filepath = path.join(root_dir, filename)
+ if self.files_matching:
+ if not self.matcher.search(filename):
+ stop_iter = True
+
+ if not stop_iter and not failed:
+ if self.ignore_unknown_extensions:
+ if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename):
+ failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
+ if not failed:
+ results.update(loaded_data)
+ else:
+ if path.exists(filepath) and not self._ignore_file(filename):
+ failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
+ if not failed:
+ results.update(loaded_data)
+
+ return failed, err_msg, results
diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
new file mode 100644
index 0000000..cb91521
--- /dev/null
+++ b/lib/ansible/plugins/action/normal.py
@@ -0,0 +1,59 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+
+ # individual modules might disagree but as the generic the action plugin, pass at this point.
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ if result.get('invocation', {}).get('module_args'):
+ # avoid passing to modules in case of no_log
+ # should not be set anymore but here for backwards compatibility
+ del result['invocation']['module_args']
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # do work!
+ result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
+
+ # hack to keep --verbose from showing all the setup module result
+ # moved from setup module as now we filter out all _ansible_ from result
+ # FIXME: is this still accurate with gather_facts etc, or does it need support for FQ and other names?
+ if self._task.action in C._ACTION_SETUP:
+ result['_ansible_verbose_override'] = True
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
new file mode 100644
index 0000000..6c43659
--- /dev/null
+++ b/lib/ansible/plugins/action/package.py
@@ -0,0 +1,96 @@
+# (c) 2015, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleAction, AnsibleActionFail
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ BUILTIN_PKG_MGR_MODULES = {manager['name'] for manager in PKG_MGRS}
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for package operations '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ module = self._task.args.get('use', 'auto')
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template('{{ansible_facts.pkg_mgr}}')
+ except Exception:
+ pass # could not get it from template!
+
+ try:
+ if module == 'auto':
+ facts = self._execute_module(
+ module_name='ansible.legacy.setup',
+ module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'),
+ task_vars=task_vars)
+ display.debug("Facts %s" % facts)
+ module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto')
+
+ if module != 'auto':
+ if not self._shared_loader_obj.module_loader.has_plugin(module):
+ raise AnsibleActionFail('Could not find a module for %s.' % module)
+ else:
+ # run the 'package' module
+ new_module_args = self._task.args.copy()
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ # get defaults for specific module
+ context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
+ new_module_args = get_action_args_with_defaults(
+ context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
+ action_groups=self._task._parent._play._action_groups
+ )
+
+ if module in self.BUILTIN_PKG_MGR_MODULES:
+ # prefix with ansible.legacy to eliminate external collisions while still allowing library/ override
+ module = 'ansible.legacy.' + module
+
+ display.vvvv("Running %s" % module)
+ result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ else:
+ raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
new file mode 100644
index 0000000..4c98cbb
--- /dev/null
+++ b/lib/ansible/plugins/action/pause.py
@@ -0,0 +1,311 @@
+# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import signal
+import sys
+import termios
+import time
+import tty
+
+from os import (
+ getpgrp,
+ isatty,
+ tcgetpgrp,
+)
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+try:
+ import curses
+ import io
+
+ # Nest the try except since curses.error is not available if curses did not import
+ try:
+ curses.setupterm()
+ HAS_CURSES = True
+ except (curses.error, TypeError, io.UnsupportedOperation):
+ HAS_CURSES = False
+except ImportError:
+ HAS_CURSES = False
+
+MOVE_TO_BOL = b'\r'
+CLEAR_TO_EOL = b'\x1b[K'
+if HAS_CURSES:
+ # curses.tigetstr() returns None in some circumstances
+ MOVE_TO_BOL = curses.tigetstr('cr') or MOVE_TO_BOL
+ CLEAR_TO_EOL = curses.tigetstr('el') or CLEAR_TO_EOL
+
+
+def setraw(fd, when=termios.TCSAFLUSH):
+ """Put terminal into a raw mode.
+
+ Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG
+
+ OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display
+ is proxied via the queue from forks. The problem is a race condition, in that we proxy the display
+ over the fork, but before it can be displayed, this plugin will have continued executing, potentially
+ setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF
+ """
+ mode = termios.tcgetattr(fd)
+ mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON)
+ # mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST)
+ mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB)
+ mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8
+ mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG)
+ mode[tty.CC][termios.VMIN] = 1
+ mode[tty.CC][termios.VTIME] = 0
+ termios.tcsetattr(fd, when, mode)
+
+
+class AnsibleTimeoutExceeded(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ raise AnsibleTimeoutExceeded
+
+
+def clear_line(stdout):
+ stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
+ stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
+
+
+def is_interactive(fd=None):
+ if fd is None:
+ return False
+
+ if isatty(fd):
+ # Compare the current process group to the process group associated
+ # with terminal of the given file descriptor to determine if the process
+ # is running in the background.
+ return getpgrp() == tcgetpgrp(fd)
+ else:
+ return False
+
+
+class ActionModule(ActionBase):
+ ''' pauses execution for a length or time, or until input is received '''
+
+ BYPASS_HOST_LOOP = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' run the pause action module '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ validation_result, new_module_args = self.validate_argument_spec(
+ argument_spec={
+ 'echo': {'type': 'bool', 'default': True},
+ 'minutes': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
+ 'seconds': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
+ 'prompt': {'type': 'str'},
+ },
+ mutually_exclusive=(
+ ('minutes', 'seconds'),
+ ),
+ )
+
+ duration_unit = 'minutes'
+ prompt = None
+ seconds = None
+ echo = new_module_args['echo']
+ echo_prompt = ''
+ result.update(dict(
+ changed=False,
+ rc=0,
+ stderr='',
+ stdout='',
+ start=None,
+ stop=None,
+ delta=None,
+ echo=echo
+ ))
+
+ # Add a note saying the output is hidden if echo is disabled
+ if not echo:
+ echo_prompt = ' (output is hidden)'
+
+ if new_module_args['prompt']:
+ prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), new_module_args['prompt'], echo_prompt)
+ else:
+ # If no custom prompt is specified, set a default prompt
+ prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
+
+ if new_module_args['minutes'] is not None:
+ seconds = new_module_args['minutes'] * 60
+ elif new_module_args['seconds'] is not None:
+ seconds = new_module_args['seconds']
+ duration_unit = 'seconds'
+
+ ########################################################################
+ # Begin the hard work!
+
+ start = time.time()
+ result['start'] = to_text(datetime.datetime.now())
+ result['user_input'] = b''
+
+ stdin_fd = None
+ old_settings = None
+ try:
+ if seconds is not None:
+ if seconds < 1:
+ seconds = 1
+
+ # setup the alarm handler
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(seconds)
+
+ # show the timer and control prompts
+ display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
+ display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
+
+ # show the prompt specified in the task
+ if new_module_args['prompt']:
+ display.display(prompt)
+
+ else:
+ display.display(prompt)
+
+ # save the attributes on the existing (duped) stdin so
+ # that we can restore them later after we set raw mode
+ stdin_fd = None
+ stdout_fd = None
+ try:
+ stdin = self._connection._new_stdin.buffer
+ stdout = sys.stdout.buffer
+ stdin_fd = stdin.fileno()
+ stdout_fd = stdout.fileno()
+ except (ValueError, AttributeError):
+ # ValueError: someone is using a closed file descriptor as stdin
+ # AttributeError: someone is using a null file descriptor as stdin on windoze
+ stdin = None
+ interactive = is_interactive(stdin_fd)
+ if interactive:
+ # grab actual Ctrl+C sequence
+ try:
+ intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
+ except Exception:
+ # unsupported/not present, use default
+ intr = b'\x03' # value for Ctrl+C
+
+ # get backspace sequences
+ try:
+ backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
+ except Exception:
+ backspace = [b'\x7f', b'\x08']
+
+ old_settings = termios.tcgetattr(stdin_fd)
+ setraw(stdin_fd)
+
+ # Only set stdout to raw mode if it is a TTY. This is needed when redirecting
+ # stdout to a file since a file cannot be set to raw mode.
+ if isatty(stdout_fd):
+ setraw(stdout_fd)
+
+ # Only echo input if no timeout is specified
+ if not seconds and echo:
+ new_settings = termios.tcgetattr(stdin_fd)
+ new_settings[3] = new_settings[3] | termios.ECHO
+ termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
+
+ # flush the buffer to make sure no previous key presses
+ # are read in below
+ termios.tcflush(stdin, termios.TCIFLUSH)
+
+ while True:
+ if not interactive:
+ if seconds is None:
+ display.warning("Not waiting for response to prompt as stdin is not interactive")
+ if seconds is not None:
+ # Give the signal handler enough time to timeout
+ time.sleep(seconds + 1)
+ break
+
+ try:
+ key_pressed = stdin.read(1)
+
+ if key_pressed == intr: # value for Ctrl+C
+ clear_line(stdout)
+ raise KeyboardInterrupt
+
+ if not seconds:
+ # read key presses and act accordingly
+ if key_pressed in (b'\r', b'\n'):
+ clear_line(stdout)
+ break
+ elif key_pressed in backspace:
+ # delete a character if backspace is pressed
+ result['user_input'] = result['user_input'][:-1]
+ clear_line(stdout)
+ if echo:
+ stdout.write(result['user_input'])
+ stdout.flush()
+ else:
+ result['user_input'] += key_pressed
+
+ except KeyboardInterrupt:
+ signal.alarm(0)
+ display.display("Press 'C' to continue the play or 'A' to abort \r"),
+ if self._c_or_a(stdin):
+ clear_line(stdout)
+ break
+
+ clear_line(stdout)
+
+ raise AnsibleError('user requested abort!')
+
+ except AnsibleTimeoutExceeded:
+ # this is the exception we expect when the alarm signal
+ # fires, so we simply ignore it to move into the cleanup
+ pass
+ finally:
+ # cleanup and save some information
+ # restore the old settings for the duped stdin stdin_fd
+ if not (None in (stdin_fd, old_settings)) and isatty(stdin_fd):
+ termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)
+
+ duration = time.time() - start
+ result['stop'] = to_text(datetime.datetime.now())
+ result['delta'] = int(duration)
+
+ if duration_unit == 'minutes':
+ duration = round(duration / 60.0, 2)
+ else:
+ duration = round(duration, 2)
+ result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
+
+ result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')
+ return result
+
+ def _c_or_a(self, stdin):
+ while True:
+ key_pressed = stdin.read(1)
+ if key_pressed.lower() == b'a':
+ return False
+ elif key_pressed.lower() == b'c':
+ return True
diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
new file mode 100644
index 0000000..b82ed34
--- /dev/null
+++ b/lib/ansible/plugins/action/raw.py
@@ -0,0 +1,50 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ if self._task.environment and any(self._task.environment):
+ self._display.warning('raw module does not support the environment keyword')
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if self._play_context.check_mode:
+ # in --check mode, always skip this module execution
+ result['skipped'] = True
+ return result
+
+ executable = self._task.args.get('executable', False)
+ result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
+
+ result['changed'] = True
+
+ if 'rc' in result and result['rc'] != 0:
+ result['failed'] = True
+ result['msg'] = 'non-zero return code'
+
+ return result
diff --git a/lib/ansible/plugins/action/reboot.py b/lib/ansible/plugins/action/reboot.py
new file mode 100644
index 0000000..40447d1
--- /dev/null
+++ b/lib/ansible/plugins/action/reboot.py
@@ -0,0 +1,465 @@
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import random
+import time
+
+from datetime import datetime, timedelta
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.validation import check_type_list, check_type_str
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'boot_time_command',
+ 'connect_timeout',
+ 'msg',
+ 'post_reboot_delay',
+ 'pre_reboot_delay',
+ 'reboot_command',
+ 'reboot_timeout',
+ 'search_paths',
+ 'test_command',
+ ))
+
+ DEFAULT_REBOOT_TIMEOUT = 600
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_REBOOT_DELAY = 0
+ DEFAULT_POST_REBOOT_DELAY = 0
+ DEFAULT_TEST_COMMAND = 'whoami'
+ DEFAULT_BOOT_TIME_COMMAND = 'cat /proc/sys/kernel/random/boot_id'
+ DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-r {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ DEPRECATED_ARGS = {} # type: dict[str, str]
+
+ BOOT_TIME_COMMANDS = {
+ 'freebsd': '/sbin/sysctl kern.boottime',
+ 'openbsd': '/sbin/sysctl kern.boottime',
+ 'macosx': 'who -b',
+ 'solaris': 'who -b',
+ 'sunos': 'who -b',
+ 'vmkernel': 'grep booted /var/log/vmksummary.log | tail -n 1',
+ 'aix': 'who -b',
+ }
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'reboot',
+ 'vmkernel': 'reboot',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-r +{delay_min} "{message}"',
+ 'freebsd': '-r +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-r +{delay_min} "{message}"',
+ 'openbsd': '-r +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 6 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 6 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fr',
+ }
+
+ TEST_COMMANDS = {
+ 'solaris': 'who',
+ 'vmkernel': 'who',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def pre_reboot_delay(self):
+ return self._check_delay('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY)
+
+ @property
+ def post_reboot_delay(self):
+ return self._check_delay('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, self._task.args.get(key + '_sec', default)))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ reboot_command = self._task.args.get('reboot_command')
+ if reboot_command is not None:
+ try:
+ reboot_command = check_type_str(reboot_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
+
+ # No args were provided
+ try:
+ return reboot_command.split(' ', 1)[1]
+ except IndexError:
+ return ''
+ else:
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_min = self.pre_reboot_delay // 60
+ reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
+ return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ reboot_command = self._task.args.get('reboot_command')
+ if reboot_command is not None:
+ try:
+ reboot_command = check_type_str(reboot_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'reboot_command': %s." % to_native(e))
+ shutdown_bin = reboot_command.split(' ', 1)[0]
+ else:
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+
+ if shutdown_bin[0] == '/':
+ return shutdown_bin
+ else:
+ default_search_paths = ['/sbin', '/bin', '/usr/sbin', '/usr/bin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ try:
+ # Convert bare strings to a list
+ search_paths = check_type_list(search_paths)
+ except TypeError:
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ return full_path[0]
+
+ def deprecated_args(self):
+ for arg, version in self.DEPRECATED_ARGS.items():
+ if self._task.args.get(arg) is not None:
+ display.warning("Since Ansible {version}, {arg} is no longer a valid option for {action}".format(
+ version=version,
+ arg=arg,
+ action=self._task.action))
+
+ def get_system_boot_time(self, distribution):
+ boot_time_command = self._get_value_from_facts('BOOT_TIME_COMMANDS', distribution, 'DEFAULT_BOOT_TIME_COMMAND')
+ if self._task.args.get('boot_time_command'):
+ boot_time_command = self._task.args.get('boot_time_command')
+
+ try:
+ check_type_str(boot_time_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'boot_time_command': %s." % to_native(e))
+
+ display.debug("{action}: getting boot time with command: '{command}'".format(action=self._task.action, command=boot_time_command))
+ command_result = self._low_level_execute_command(boot_time_command, sudoable=self.DEFAULT_SUDOABLE)
+
+ if command_result['rc'] != 0:
+ stdout = command_result['stdout']
+ stderr = command_result['stderr']
+ raise AnsibleError("{action}: failed to get host boot time info, rc: {rc}, stdout: {out}, stderr: {err}".format(
+ action=self._task.action,
+ rc=command_result['rc'],
+ out=to_native(stdout),
+ err=to_native(stderr)))
+ display.debug("{action}: last boot time: {boot}".format(action=self._task.action, boot=command_result['stdout'].strip()))
+ return command_result['stdout'].strip()
+
+ def check_boot_time(self, distribution, previous_boot_time):
+ display.vvv("{action}: attempting to get system boot time".format(action=self._task.action))
+ connect_timeout = self._task.args.get('connect_timeout', self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
+
+ # override connection timeout from defaults to custom value
+ if connect_timeout:
+ try:
+ display.debug("{action}: setting connect_timeout to {value}".format(action=self._task.action, value=connect_timeout))
+ self._connection.set_option("connection_timeout", connect_timeout)
+ self._connection.reset()
+ except AttributeError:
+ display.warning("Connection plugin does not allow the connection timeout to be overridden")
+
+ # try and get boot time
+ try:
+ current_boot_time = self.get_system_boot_time(distribution)
+ except Exception as e:
+ raise e
+
+ # FreeBSD returns an empty string immediately before reboot so adding a length
+ # check to prevent prematurely assuming system has rebooted
+ if len(current_boot_time) == 0 or current_boot_time == previous_boot_time:
+ raise ValueError("boot time has not changed")
+
+ def run_test_command(self, distribution, **kwargs):
+ test_command = self._task.args.get('test_command', self._get_value_from_facts('TEST_COMMANDS', distribution, 'DEFAULT_TEST_COMMAND'))
+ display.vvv("{action}: attempting post-reboot test command".format(action=self._task.action))
+ display.debug("{action}: attempting post-reboot test command '{command}'".format(action=self._task.action, command=test_command))
+ try:
+ command_result = self._low_level_execute_command(test_command, sudoable=self.DEFAULT_SUDOABLE)
+ except Exception:
+ # may need to reset the connection in case another reboot occurred
+ # which has invalidated our connection
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+ raise
+
+ if command_result['rc'] != 0:
+ msg = 'Test command failed: {err} {out}'.format(
+ err=to_native(command_result['stderr']),
+ out=to_native(command_result['stdout']))
+ raise RuntimeError(msg)
+
+ display.vvv("{action}: system successfully rebooted".format(action=self._task.action))
+
+ def do_until_success_or_timeout(self, action, reboot_timeout, action_desc, distribution, action_kwargs=None):
+ max_end_time = datetime.utcnow() + timedelta(seconds=reboot_timeout)
+ if action_kwargs is None:
+ action_kwargs = {}
+
+ fail_count = 0
+ max_fail_sleep = 12
+
+ while datetime.utcnow() < max_end_time:
+ try:
+ action(distribution=distribution, **action_kwargs)
+ if action_desc:
+ display.debug('{action}: {desc} success'.format(action=self._task.action, desc=action_desc))
+ return
+ except Exception as e:
+ if isinstance(e, AnsibleConnectionFailure):
+ try:
+ self._connection.reset()
+ except AnsibleConnectionFailure:
+ pass
+ # Use exponential backoff with a max timout, plus a little bit of randomness
+ random_int = random.randint(0, 1000) / 1000
+ fail_sleep = 2 ** fail_count + random_int
+ if fail_sleep > max_fail_sleep:
+
+ fail_sleep = max_fail_sleep + random_int
+ if action_desc:
+ try:
+ error = to_text(e).splitlines()[-1]
+ except IndexError as e:
+ error = to_text(e)
+ display.debug("{action}: {desc} fail '{err}', retrying in {sleep:.4} seconds...".format(
+ action=self._task.action,
+ desc=action_desc,
+ err=error,
+ sleep=fail_sleep))
+ fail_count += 1
+ time.sleep(fail_sleep)
+
+ raise TimedOutException('Timed out waiting for {desc} (timeout={timeout})'.format(desc=action_desc, timeout=reboot_timeout))
+
+ def perform_reboot(self, task_vars, distribution):
+ result = {}
+ reboot_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ reboot_command = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ try:
+ display.vvv("{action}: rebooting server...".format(action=self._task.action))
+ display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
+ reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ reboot_result['rc'] = 0
+
+ result['start'] = datetime.utcnow()
+
+ if reboot_result['rc'] != 0:
+ result['failed'] = True
+ result['rebooted'] = False
+ result['msg'] = "Reboot command failed. Error was: '{stdout}, {stderr}'".format(
+ stdout=to_native(reboot_result['stdout'].strip()),
+ stderr=to_native(reboot_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ return result
+
+ def validate_reboot(self, distribution, original_connection_timeout=None, action_kwargs=None):
+ display.vvv('{action}: validating reboot'.format(action=self._task.action))
+ result = {}
+
+ try:
+ # keep on checking system boot_time with short connection responses
+ reboot_timeout = int(self._task.args.get('reboot_timeout', self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT)))
+
+ self.do_until_success_or_timeout(
+ action=self.check_boot_time,
+ action_desc="last boot time check",
+ reboot_timeout=reboot_timeout,
+ distribution=distribution,
+ action_kwargs=action_kwargs)
+
+ # Get the connect_timeout set on the connection to compare to the original
+ try:
+ connect_timeout = self._connection.get_option('connection_timeout')
+ except KeyError:
+ pass
+ else:
+ if original_connection_timeout != connect_timeout:
+ try:
+ display.debug("{action}: setting connect_timeout back to original value of {value}".format(
+ action=self._task.action,
+ value=original_connection_timeout))
+ self._connection.set_option("connection_timeout", original_connection_timeout)
+ self._connection.reset()
+ except (AnsibleError, AttributeError) as e:
+ # reset the connection to clear the custom connection timeout
+ display.debug("{action}: failed to reset connection_timeout back to default: {error}".format(action=self._task.action,
+ error=to_text(e)))
+
+ # finally run test command to ensure everything is working
+ # FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
+ self.do_until_success_or_timeout(
+ action=self.run_test_command,
+ action_desc="post-reboot test command",
+ reboot_timeout=reboot_timeout,
+ distribution=distribution,
+ action_kwargs=action_kwargs)
+
+ result['rebooted'] = True
+ result['changed'] = True
+
+ except TimedOutException as toex:
+ result['failed'] = True
+ result['rebooted'] = True
+ result['msg'] = to_text(toex)
+ return result
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't reboot ourself
+ if self._connection.transport == 'local':
+ msg = 'Running {0} with local connection would reboot the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'rebooted': False, 'failed': True, 'msg': msg}
+
+ if self._play_context.check_mode:
+ return {'changed': True, 'elapsed': 0, 'rebooted': True}
+
+ if task_vars is None:
+ task_vars = {}
+
+ self.deprecated_args()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Get current boot time
+ try:
+ previous_boot_time = self.get_system_boot_time(distribution)
+ except Exception as e:
+ result['failed'] = True
+ result['reboot'] = False
+ result['msg'] = to_text(e)
+ return result
+
+ # Get the original connection_timeout option var so it can be reset after
+ original_connection_timeout = None
+ try:
+ original_connection_timeout = self._connection.get_option('connection_timeout')
+ display.debug("{action}: saving original connect_timeout of {timeout}".format(action=self._task.action, timeout=original_connection_timeout))
+ except KeyError:
+ display.debug("{action}: connect_timeout connection option has not been set".format(action=self._task.action))
+ # Initiate reboot
+ reboot_result = self.perform_reboot(task_vars, distribution)
+
+ if reboot_result['failed']:
+ result = reboot_result
+ elapsed = datetime.utcnow() - reboot_result['start']
+ result['elapsed'] = elapsed.seconds
+ return result
+
+ if self.post_reboot_delay != 0:
+ display.debug("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
+ display.vvv("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
+ time.sleep(self.post_reboot_delay)
+
+ # Make sure reboot was successful
+ result = self.validate_reboot(distribution, original_connection_timeout, action_kwargs={'previous_boot_time': previous_boot_time})
+
+ elapsed = datetime.utcnow() - reboot_result['start']
+ result['elapsed'] = elapsed.seconds
+
+ return result
diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
new file mode 100644
index 0000000..1bbb800
--- /dev/null
+++ b/lib/ansible/plugins/action/script.py
@@ -0,0 +1,160 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shlex
+
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip
+from ansible.executor.powershell import module_manifest as ps_manifest
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ # On Windows platform, absolute paths begin with a (back)slash
+ # after chopping off a potential drive letter.
+ windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)')
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ try:
+ creates = self._task.args.get('creates')
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if self._remote_file_exists(creates):
+ raise AnsibleActionSkip("%s exists, matching creates option" % creates)
+
+ removes = self._task.args.get('removes')
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not self._remote_file_exists(removes):
+ raise AnsibleActionSkip("%s does not exist, matching removes option" % removes)
+
+ # The chdir must be absolute, because a relative path would rely on
+ # remote node behaviour & user config.
+ chdir = self._task.args.get('chdir')
+ if chdir:
+ # Powershell is the only Windows-path aware shell
+ if getattr(self._connection._shell, "_IS_WINDOWS", False) and \
+ not self.windows_absolute_path_detection.match(chdir):
+ raise AnsibleActionFail('chdir %s must be an absolute path for a Windows remote node' % chdir)
+ # Every other shell is unix-path-aware.
+ if not getattr(self._connection._shell, "_IS_WINDOWS", False) and not chdir.startswith('/'):
+ raise AnsibleActionFail('chdir %s must be an absolute path for a Unix-aware remote node' % chdir)
+
+ # Split out the script as the first item in raw_params using
+ # shlex.split() in order to support paths and files with spaces in the name.
+ # Any arguments passed to the script will be added back later.
+ raw_params = to_native(self._task.args.get('_raw_params', ''), errors='surrogate_or_strict')
+ parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())]
+ source = parts[0]
+
+ # Support executable paths and files with spaces in the name.
+ executable = to_native(self._task.args.get('executable', ''), errors='surrogate_or_strict')
+
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True))
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ if self._task.check_mode:
+ # check mode is supported if 'creates' or 'removes' are provided
+ # the task has already been skipped if a change would not occur
+ if self._task.args.get('creates') or self._task.args.get('removes'):
+ result['changed'] = True
+ raise _AnsibleActionDone(result=result)
+ # If the script doesn't return changed in the result, it defaults to True,
+ # but since the script may override 'changed', just skip instead of guessing.
+ else:
+ result['changed'] = False
+ raise AnsibleActionSkip('Check mode is not supported for this task.', result=result)
+
+ # now we execute script, always assume changed.
+ result['changed'] = True
+
+ # transfer the file to a remote tmp location
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir,
+ os.path.basename(source))
+
+ # Convert raw_params to text for the purpose of replacing the script since
+ # parts and tmp_src are both unicode strings and raw_params will be different
+ # depending on Python version.
+ #
+ # Once everything is encoded consistently, replace the script path on the remote
+ # system with the remainder of the raw_params. This preserves quoting in parameters
+ # that would have been removed by shlex.split().
+ target_command = to_text(raw_params).strip().replace(parts[0], tmp_src)
+
+ self._transfer_file(source, tmp_src)
+
+ # set file permissions, more permissive when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True)
+
+ # add preparation steps to one ssh roundtrip executing the script
+ env_dict = dict()
+ env_string = self._compute_environment_string(env_dict)
+
+ if executable:
+ script_cmd = ' '.join([env_string, executable, target_command])
+ else:
+ script_cmd = ' '.join([env_string, target_command])
+
+ script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
+
+ exec_data = None
+ # PowerShell runs the script in a special wrapper to enable things
+ # like become and environment args
+ if getattr(self._connection._shell, "_IS_WINDOWS", False):
+ # FUTURE: use a more public method to get the exec payload
+ pc = self._play_context
+ exec_data = ps_manifest._create_powershell_wrapper(
+ to_bytes(script_cmd), source, {}, env_dict, self._task.async_val,
+ pc.become, pc.become_method, pc.become_user,
+ pc.become_pass, pc.become_flags, "script", task_vars, None
+ )
+ # build the necessary exec wrapper command
+ # FUTURE: this still doesn't let script work on Windows with non-pipelined connections or
+ # full manual exec of KEEP_REMOTE_FILES
+ script_cmd = self._connection._shell.build_module_command(env_string='', shebang='#!powershell', cmd='')
+
+ result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir))
+
+ if 'rc' in result and result['rc'] != 0:
+ raise AnsibleActionFail('non-zero return code')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py
new file mode 100644
index 0000000..c061687
--- /dev/null
+++ b/lib/ansible/plugins/action/service.py
@@ -0,0 +1,103 @@
+# (c) 2015, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleAction, AnsibleActionFail
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ UNUSED_PARAMS = {
+ 'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
+ }
+
+ # HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to
+ # avoid collisions with collections search
+ BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit'])
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for package operations '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ module = self._task.args.get('use', 'auto').lower()
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template('{{ansible_facts.service_mgr}}')
+ except Exception:
+ pass # could not get it from template!
+
+ try:
+ if module == 'auto':
+ facts = self._execute_module(
+ module_name='ansible.legacy.setup',
+ module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
+ self._display.debug("Facts %s" % facts)
+ module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
+
+ if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module):
+ module = 'ansible.legacy.service'
+
+ if module != 'auto':
+ # run the 'service' module
+ new_module_args = self._task.args.copy()
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ if module in self.UNUSED_PARAMS:
+ for unused in self.UNUSED_PARAMS[module]:
+ if unused in new_module_args:
+ del new_module_args[unused]
+ self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
+
+ # get defaults for specific module
+ context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
+ new_module_args = get_action_args_with_defaults(
+ context.resolved_fqcn, new_module_args, self._task.module_defaults, self._templar,
+ action_groups=self._task._parent._play._action_groups
+ )
+
+ # collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
+ if module in self.BUILTIN_SVC_MGR_MODULES:
+ module = 'ansible.legacy.' + module
+
+ self._display.vvvv("Running %s" % module)
+ result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ else:
+ raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
new file mode 100644
index 0000000..ae92de8
--- /dev/null
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -0,0 +1,68 @@
+# Copyright 2013 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleActionFail
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import isidentifier
+
+import ansible.constants as C
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ facts = {}
+ cacheable = boolean(self._task.args.pop('cacheable', False))
+
+ if self._task.args:
+ for (k, v) in self._task.args.items():
+ k = self._templar.template(k)
+
+ if not isidentifier(k):
+ raise AnsibleActionFail("The variable name '%s' is not valid. Variables must start with a letter or underscore character, "
+ "and contain only letters, numbers and underscores." % k)
+
+ # NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case,
+ # right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled.
+ if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
+ v = boolean(v, strict=False)
+ facts[k] = v
+ else:
+ raise AnsibleActionFail('No key/value pairs provided, at least one is required for this action to succeed')
+
+ if facts:
+ # just as _facts actions, we don't set changed=true as we are not modifying the actual host
+ result['ansible_facts'] = facts
+ result['_ansible_facts_cacheable'] = cacheable
+ else:
+ # this should not happen, but JIC we get here
+ raise AnsibleActionFail('Unable to create any variables with provided arguments')
+
+ return result
diff --git a/lib/ansible/plugins/action/set_stats.py b/lib/ansible/plugins/action/set_stats.py
new file mode 100644
index 0000000..9d429ce
--- /dev/null
+++ b/lib/ansible/plugins/action/set_stats.py
@@ -0,0 +1,77 @@
+# Copyright 2016 Ansible (RedHat, Inc)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import isidentifier
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('aggregate', 'data', 'per_host'))
+
+ # TODO: document this in non-empty set_stats.py module
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ stats = {'data': {}, 'per_host': False, 'aggregate': True}
+
+ if self._task.args:
+ data = self._task.args.get('data', {})
+
+ if not isinstance(data, dict):
+ data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
+
+ if not isinstance(data, dict):
+ result['failed'] = True
+ result['msg'] = "The 'data' option needs to be a dictionary/hash"
+ return result
+
+ # set boolean options, defaults are set above in stats init
+ for opt in ['per_host', 'aggregate']:
+ val = self._task.args.get(opt, None)
+ if val is not None:
+ if not isinstance(val, bool):
+ stats[opt] = boolean(self._templar.template(val), strict=False)
+ else:
+ stats[opt] = val
+
+ for (k, v) in data.items():
+
+ k = self._templar.template(k)
+
+ if not isidentifier(k):
+ result['failed'] = True
+ result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
+ "letters, numbers and underscores." % k)
+ return result
+
+ stats['data'][k] = self._templar.template(v)
+
+ result['changed'] = False
+ result['ansible_stats'] = stats
+
+ return result
diff --git a/lib/ansible/plugins/action/shell.py b/lib/ansible/plugins/action/shell.py
new file mode 100644
index 0000000..617a373
--- /dev/null
+++ b/lib/ansible/plugins/action/shell.py
@@ -0,0 +1,27 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ # Shell module is implemented via command with a special arg
+ self._task.args['_uses_shell'] = True
+
+ command_action = self._shared_loader_obj.action_loader.get('ansible.legacy.command',
+ task=self._task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj)
+ result = command_action.run(task_vars=task_vars)
+
+ return result
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
new file mode 100644
index 0000000..d2b3df9
--- /dev/null
+++ b/lib/ansible/plugins/action/template.py
@@ -0,0 +1,190 @@
+# Copyright: (c) 2015, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shutil
+import stat
+import tempfile
+
+from ansible import constants as C
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.plugins.action import ActionBase
+from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for template operations '''
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # Options type validation
+ # stings
+ for s_type in ('src', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string', 'comment_start_string', 'comment_end_string'):
+ if s_type in self._task.args:
+ value = ensure_type(self._task.args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ self._task.args[s_type] = value
+
+ # booleans
+ try:
+ follow = boolean(self._task.args.get('follow', False), strict=False)
+ trim_blocks = boolean(self._task.args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(self._task.args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ # assign to local vars for ease of use
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ state = self._task.args.get('state', None)
+ newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = self._task.args.get('variable_start_string', None)
+ variable_end_string = self._task.args.get('variable_end_string', None)
+ block_start_string = self._task.args.get('block_start_string', None)
+ block_end_string = self._task.args.get('block_end_string', None)
+ comment_start_string = self._task.args.get('comment_start_string', None)
+ comment_end_string = self._task.args.get('comment_end_string', None)
+ output_encoding = self._task.args.get('output_encoding', 'utf-8') or 'utf-8'
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+
+ try:
+ # logical validation
+ if state is not None:
+ raise AnsibleActionFail("'state' cannot be specified on a template")
+ elif source is None or dest is None:
+ raise AnsibleActionFail("src and dest are required")
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+ else:
+ try:
+ source = self._find_needle('templates', source)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ mode = self._task.args.get('mode', None)
+ if mode == 'preserve':
+ mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # set jinja2 internal search path for includes
+ searchpath = task_vars.get('ansible_search_path', [])
+ searchpath.extend([self._loader._basedir, os.path.dirname(source)])
+
+ # We want to search into the 'templates' subdir of each search path in
+ # addition to our original search paths.
+ newsearchpath = []
+ for p in searchpath:
+ newsearchpath.append(os.path.join(p, 'templates'))
+ newsearchpath.append(p)
+ searchpath = newsearchpath
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ # NOTE in the case of ANSIBLE_DEBUG=1 task_vars is VarsWithSources(MutableMapping)
+ # so | operator cannot be used as it can be used only on dicts
+ # https://peps.python.org/pep-0584/#what-about-mapping-and-mutablemapping
+ temp_vars.update(generate_ansible_template_vars(self._task.args.get('src', None), source, dest))
+
+ # force templar to use AnsibleEnvironment to prevent issues with native types
+ # https://github.com/ansible/ansible/issues/46169
+ templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment,
+ searchpath=searchpath,
+ newline_sequence=newline_sequence,
+ block_start_string=block_start_string,
+ block_end_string=block_end_string,
+ variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ comment_start_string=comment_start_string,
+ comment_end_string=comment_end_string,
+ trim_blocks=trim_blocks,
+ lstrip_blocks=lstrip_blocks,
+ available_variables=temp_vars)
+ resultant = templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ new_task = self._task.copy()
+ # mode is either the mode from task.args or the mode of the source file if the task.args
+ # mode == 'preserve'
+ new_task.args['mode'] = mode
+
+ # remove 'template only' options:
+ for remove in ('newline_sequence', 'block_start_string', 'block_end_string', 'variable_start_string', 'variable_end_string',
+ 'comment_start_string', 'comment_end_string', 'trim_blocks', 'lstrip_blocks', 'output_encoding'):
+ new_task.args.pop(remove, None)
+
+ local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+
+ try:
+ result_file = os.path.join(local_tempdir, os.path.basename(source))
+ with open(to_bytes(result_file, errors='surrogate_or_strict'), 'wb') as f:
+ f.write(to_bytes(resultant, encoding=output_encoding, errors='surrogate_or_strict'))
+
+ new_task.args.update(
+ dict(
+ src=result_file,
+ dest=dest,
+ follow=follow,
+ ),
+ )
+ # call with ansible.legacy prefix to eliminate collisions with collections while still allowing local override
+ copy_action = self._shared_loader_obj.action_loader.get('ansible.legacy.copy',
+ task=new_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj)
+ result.update(copy_action.run(task_vars=task_vars))
+ finally:
+ shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict'))
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
new file mode 100644
index 0000000..4d188e3
--- /dev/null
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -0,0 +1,111 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
+from ansible.module_utils._text import to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for unarchive operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ creates = self._task.args.get('creates', None)
+ decrypt = self._task.args.get('decrypt', True)
+
+ try:
+ # "copy" is deprecated in favor of "remote_src".
+ if 'copy' in self._task.args:
+ # They are mutually exclusive.
+ if 'remote_src' in self._task.args:
+ raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
+ # We will take the information from copy and store it in
+ # the remote_src var to use later in this file.
+ self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
+
+ if source is None or dest is None:
+ raise AnsibleActionFail("src (or content) and dest are required")
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ creates = self._remote_expand_user(creates)
+ if self._remote_file_exists(creates):
+ raise AnsibleActionSkip("skipped, since %s exists" % creates)
+
+ dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
+ source = os.path.expanduser(source)
+
+ if not remote_src:
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ try:
+ remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ if not remote_stat['exists'] or not remote_stat['isdir']:
+ raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
+
+ if not remote_src:
+ # transfer the file to a remote tmp location
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
+ self._transfer_file(source, tmp_src)
+
+ # handle diff mode client side
+ # handle check mode client side
+
+ # remove action plugin only keys
+ new_module_args = self._task.args.copy()
+ for key in ('decrypt',):
+ if key in new_module_args:
+ del new_module_args[key]
+
+ if not remote_src:
+ # fix file permissions when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ new_module_args['src'] = tmp_src
+
+ # execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections
+ # collisions with local override
+ result.update(self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars))
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/lib/ansible/plugins/action/uri.py b/lib/ansible/plugins/action/uri.py
new file mode 100644
index 0000000..bbaf092
--- /dev/null
+++ b/lib/ansible/plugins/action/uri.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.collections import Mapping, MutableMapping
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import text_type
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_async = True
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ body_format = self._task.args.get('body_format', 'raw')
+ body = self._task.args.get('body')
+ src = self._task.args.get('src', None)
+ remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
+
+ try:
+ if remote_src:
+ # everything is remote, so we just execute the module
+ # without changing any of the module arguments
+ # call with ansible.legacy prefix to prevent collections collisions while allowing local override
+ raise _AnsibleActionDone(result=self._execute_module(module_name='ansible.legacy.uri',
+ task_vars=task_vars, wrap_async=self._task.async_val))
+
+ kwargs = {}
+
+ if src:
+ try:
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
+ kwargs['src'] = tmp_src
+ self._transfer_file(src, tmp_src)
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ elif body_format == 'form-multipart':
+ if not isinstance(body, Mapping):
+ raise AnsibleActionFail(
+ 'body must be mapping, cannot be type %s' % body.__class__.__name__
+ )
+ for field, value in body.items():
+ if not isinstance(value, MutableMapping):
+ continue
+ content = value.get('content')
+ filename = value.get('filename')
+ if not filename or content:
+ continue
+
+ try:
+ filename = self._find_needle('files', filename)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ tmp_src = self._connection._shell.join_path(
+ self._connection._shell.tmpdir,
+ os.path.basename(filename)
+ )
+ value['filename'] = tmp_src
+ self._transfer_file(filename, tmp_src)
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ kwargs['body'] = body
+
+ new_module_args = self._task.args | kwargs
+
+ # call with ansible.legacy prefix to prevent collections collisions while allowing local override
+ result.update(self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/lib/ansible/plugins/action/validate_argument_spec.py b/lib/ansible/plugins/action/validate_argument_spec.py
new file mode 100644
index 0000000..dc7d6cb
--- /dev/null
+++ b/lib/ansible/plugins/action/validate_argument_spec.py
@@ -0,0 +1,94 @@
+# Copyright 2021 Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
+from ansible.module_utils.errors import AnsibleValidationErrorMultiple
+from ansible.utils.vars import combine_vars
+
+
+class ActionModule(ActionBase):
+ ''' Validate an arg spec'''
+
+ TRANSFERS_FILES = False
+
+ def get_args_from_task_vars(self, argument_spec, task_vars):
+ '''
+ Get any arguments that may come from `task_vars`.
+
+ Expand templated variables so we can validate the actual values.
+
+ :param argument_spec: A dict of the argument spec.
+ :param task_vars: A dict of task variables.
+
+ :returns: A dict of values that can be validated against the arg spec.
+ '''
+ args = {}
+
+ for argument_name, argument_attrs in argument_spec.items():
+ if argument_name in task_vars:
+ args[argument_name] = task_vars[argument_name]
+ args = self._templar.template(args)
+ return args
+
+ def run(self, tmp=None, task_vars=None):
+ '''
+ Validate an argument specification against a provided set of data.
+
+ The `validate_argument_spec` module expects to receive the arguments:
+ - argument_spec: A dict whose keys are the valid argument names, and
+ whose values are dicts of the argument attributes (type, etc).
+ - provided_arguments: A dict whose keys are the argument names, and
+ whose values are the argument value.
+
+ :param tmp: Deprecated. Do not use.
+ :param task_vars: A dict of task variables.
+ :return: An action result dict, including a 'argument_errors' key with a
+ list of validation errors found.
+ '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # This action can be called from anywhere, so pass in some info about what it is
+ # validating args for so the error results make some sense
+ result['validate_args_context'] = self._task.args.get('validate_args_context', {})
+
+ if 'argument_spec' not in self._task.args:
+ raise AnsibleError('"argument_spec" arg is required in args: %s' % self._task.args)
+
+ # Get the task var called argument_spec. This will contain the arg spec
+ # data dict (for the proper entry point for a role).
+ argument_spec_data = self._task.args.get('argument_spec')
+
+ # the values that were passed in and will be checked against argument_spec
+ provided_arguments = self._task.args.get('provided_arguments', {})
+
+ if not isinstance(argument_spec_data, dict):
+ raise AnsibleError('Incorrect type for argument_spec, expected dict and got %s' % type(argument_spec_data))
+
+ if not isinstance(provided_arguments, dict):
+ raise AnsibleError('Incorrect type for provided_arguments, expected dict and got %s' % type(provided_arguments))
+
+ args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars)
+ validator = ArgumentSpecValidator(argument_spec_data)
+ validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments), validate_role_argument_spec=True)
+
+ if validation_result.error_messages:
+ result['failed'] = True
+ result['msg'] = 'Validation of arguments failed:\n%s' % '\n'.join(validation_result.error_messages)
+ result['argument_spec_data'] = argument_spec_data
+ result['argument_errors'] = validation_result.error_messages
+ return result
+
+ result['changed'] = False
+ result['msg'] = 'The arg spec validation passed'
+
+ return result
diff --git a/lib/ansible/plugins/action/wait_for_connection.py b/lib/ansible/plugins/action/wait_for_connection.py
new file mode 100644
index 0000000..8489c76
--- /dev/null
+++ b/lib/ansible/plugins/action/wait_for_connection.py
@@ -0,0 +1,120 @@
+# (c) 2017, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# CI-required python3 boilerplate
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+from datetime import datetime, timedelta
+
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('connect_timeout', 'delay', 'sleep', 'timeout'))
+
+ DEFAULT_CONNECT_TIMEOUT = 5
+ DEFAULT_DELAY = 0
+ DEFAULT_SLEEP = 1
+ DEFAULT_TIMEOUT = 600
+
+ def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1):
+ max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
+
+ e = None
+ while datetime.utcnow() < max_end_time:
+ try:
+ what(connect_timeout)
+ if what_desc:
+ display.debug("wait_for_connection: %s success" % what_desc)
+ return
+ except Exception as e:
+ error = e # PY3 compatibility to store exception for use outside of this block
+ if what_desc:
+ display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep))
+ time.sleep(sleep)
+
+ raise TimedOutException("timed out waiting for %s: %s" % (what_desc, error))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
+ delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
+ sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
+ timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))
+
+ if self._play_context.check_mode:
+ display.vvv("wait_for_connection: skipping for check_mode")
+ return dict(skipped=True)
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ def ping_module_test(connect_timeout):
+ ''' Test ping module, if available '''
+ display.vvv("wait_for_connection: attempting ping module test")
+ # re-run interpreter discovery if we ran it in the first iteration
+ if self._discovered_interpreter_key:
+ task_vars['ansible_facts'].pop(self._discovered_interpreter_key, None)
+ # call connection reset between runs if it's there
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ ping_result = self._execute_module(module_name='ansible.legacy.ping', module_args=dict(), task_vars=task_vars)
+
+ # Test module output
+ if ping_result['ping'] != 'pong':
+ raise Exception('ping test failed')
+
+ start = datetime.now()
+
+ if delay:
+ time.sleep(delay)
+
+ try:
+ # If the connection has a transport_test method, use it first
+ if hasattr(self._connection, 'transport_test'):
+ self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)
+
+ # Use the ping module test to determine end-to-end connectivity
+ self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test", sleep=sleep)
+
+ except TimedOutException as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+
+ elapsed = datetime.now() - start
+ result['elapsed'] = elapsed.seconds
+
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/yum.py b/lib/ansible/plugins/action/yum.py
new file mode 100644
index 0000000..d90a9e0
--- /dev/null
+++ b/lib/ansible/plugins/action/yum.py
@@ -0,0 +1,109 @@
+# (c) 2018, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleActionFail
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+VALID_BACKENDS = frozenset(('yum', 'yum4', 'dnf'))
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ '''
+ Action plugin handler for yum3 vs yum4(dnf) operations.
+
+ Enables the yum module to use yum3 and/or yum4. Yum4 is a yum
+ command-line compatibility layer on top of dnf. Since the Ansible
+ modules for yum(aka yum3) and dnf(aka yum4) call each of yum3 and yum4's
+ python APIs natively on the backend, we need to handle this here and
+ pass off to the correct Ansible module to execute on the remote system.
+ '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # Carry-over concept from the package action plugin
+ if 'use' in self._task.args and 'use_backend' in self._task.args:
+ raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')")
+
+ module = self._task.args.get('use', self._task.args.get('use_backend', 'auto'))
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template("{{ansible_facts.pkg_mgr}}")
+ except Exception:
+ pass # could not get it from template!
+
+ if module not in VALID_BACKENDS:
+ facts = self._execute_module(
+ module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
+ task_vars=task_vars)
+ display.debug("Facts %s" % facts)
+ module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
+ if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
+ result['ansible_facts'] = {'pkg_mgr': module}
+
+ if module not in VALID_BACKENDS:
+ result.update(
+ {
+ 'failed': True,
+ 'msg': ("Could not detect which major revision of yum is in use, which is required to determine module backend.",
+ "You should manually specify use_backend to tell the module whether to use the yum (yum3) or dnf (yum4) backend})"),
+ }
+ )
+
+ else:
+ if module == "yum4":
+ module = "dnf"
+
+ # eliminate collisions with collections search while still allowing local override
+ module = 'ansible.legacy.' + module
+
+ if not self._shared_loader_obj.module_loader.has_plugin(module):
+ result.update({'failed': True, 'msg': "Could not find a yum module backend for %s." % module})
+ else:
+ # run either the yum (yum3) or dnf (yum4) backend module
+ new_module_args = self._task.args.copy()
+ if 'use_backend' in new_module_args:
+ del new_module_args['use_backend']
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ display.vvvv("Running %s as the backend for the yum action plugin" % module)
+ result.update(self._execute_module(
+ module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+
+ # Cleanup
+ if not self._task.async_val:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/become/__init__.py b/lib/ansible/plugins/become/__init__.py
new file mode 100644
index 0000000..9dacf22
--- /dev/null
+++ b/lib/ansible/plugins/become/__init__.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shlex
+
+from abc import abstractmethod
+from random import choice
+from string import ascii_lowercase
+from gettext import dgettext
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes
+from ansible.plugins import AnsiblePlugin
+
+
+def _gen_id(length=32):
+ ''' return random string used to identify the current privilege escalation '''
+ return ''.join(choice(ascii_lowercase) for x in range(length))
+
+
+class BecomeBase(AnsiblePlugin):
+
+ name = None # type: str | None
+
+ # messages for detecting prompted password issues
+ fail = tuple() # type: tuple[str, ...]
+ missing = tuple() # type: tuple[str, ...]
+
+ # many connection plugins cannot provide tty, set to True if your become
+ # plugin requires a tty, i.e su
+ require_tty = False
+
+ # prompt to match
+ prompt = ''
+
+ def __init__(self):
+ super(BecomeBase, self).__init__()
+ self._id = ''
+ self.success = ''
+
+ def get_option(self, option, hostvars=None, playcontext=None):
+ """ Overrides the base get_option to provide a fallback to playcontext vars in case a 3rd party plugin did not
+ implement the base become options required in Ansible. """
+ # TODO: add deprecation warning for ValueError in devel that removes the playcontext fallback
+ try:
+ return super(BecomeBase, self).get_option(option, hostvars=hostvars)
+ except KeyError:
+ pc_fallback = ['become_user', 'become_pass', 'become_flags', 'become_exe']
+ if option not in pc_fallback:
+ raise
+
+ return getattr(playcontext, option, None)
+
+ def expect_prompt(self):
+ """This function assists connection plugins in determining if they need to wait for
+ a prompt. Both a prompt and a password are required.
+ """
+ return self.prompt and self.get_option('become_pass')
+
+ def _build_success_command(self, cmd, shell, noexe=False):
+ if not all((cmd, shell, self.success)):
+ return cmd
+
+ try:
+ cmd = shlex.quote('%s %s %s %s' % (shell.ECHO, self.success, shell.COMMAND_SEP, cmd))
+ except AttributeError:
+ # TODO: This should probably become some more robust functionality used to detect incompat
+ raise AnsibleError('The %s shell family is incompatible with the %s become plugin' % (shell.SHELL_FAMILY, self.name))
+ exe = getattr(shell, 'executable', None)
+ if exe and not noexe:
+ cmd = '%s -c %s' % (exe, cmd)
+ return cmd
+
+ @abstractmethod
+ def build_become_command(self, cmd, shell):
+ self._id = _gen_id()
+ self.success = 'BECOME-SUCCESS-%s' % self._id
+
+ def check_success(self, b_output):
+ b_success = to_bytes(self.success)
+ return any(b_success in l.rstrip() for l in b_output.splitlines(True))
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+ if self.prompt:
+ b_prompt = to_bytes(self.prompt).strip()
+ return any(l.strip().startswith(b_prompt) for l in b_output.splitlines())
+ return False
+
+ def _check_password_error(self, b_out, msg):
+ ''' returns True/False if domain specific i18n version of msg is found in b_out '''
+ b_fail = to_bytes(dgettext(self.name, msg))
+ return b_fail and b_fail in b_out
+
+ def check_incorrect_password(self, b_output):
+ for errstring in self.fail:
+ if self._check_password_error(b_output, errstring):
+ return True
+ return False
+
+ def check_missing_password(self, b_output):
+ for errstring in self.missing:
+ if self._check_password_error(b_output, errstring):
+ return True
+ return False
diff --git a/lib/ansible/plugins/become/runas.py b/lib/ansible/plugins/become/runas.py
new file mode 100644
index 0000000..0b7d466
--- /dev/null
+++ b/lib/ansible/plugins/become/runas.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: runas
+ short_description: Run As user
+ description:
+ - This become plugin allows your remote/login user to execute commands as another user via the windows runas facility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: runas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_runas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_RUNAS_USER
+ keyword:
+ - name: become_user
+ required: True
+ become_flags:
+ description: Options to pass to runas, a space delimited list of k=v pairs
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: runas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_runas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_RUNAS_FLAGS
+ keyword:
+ - name: become_flags
+ become_pass:
+ description: password
+ ini:
+ - section: runas_become_plugin
+ key: password
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_runas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_RUNAS_PASS
+ notes:
+ - runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
+ - This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
+ - The Secondary Logon service (seclogon) must be running to use runas
+"""
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'runas'
+
+ def build_become_command(self, cmd, shell):
+ # this is a noop, the 'real' runas is implemented
+ # inside the windows powershell execution subsystem
+ return cmd
diff --git a/lib/ansible/plugins/become/su.py b/lib/ansible/plugins/become/su.py
new file mode 100644
index 0000000..3a6fdea
--- /dev/null
+++ b/lib/ansible/plugins/become/su.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: su
+ short_description: Substitute User
+ description:
+ - This become plugin allows your remote/login user to execute commands as another user via the su utility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: su_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_su_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SU_USER
+ keyword:
+ - name: become_user
+ become_exe:
+ description: Su executable
+ default: su
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: su_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_su_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SU_EXE
+ keyword:
+ - name: become_exe
+ become_flags:
+ description: Options to pass to su
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: su_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_su_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SU_FLAGS
+ keyword:
+ - name: become_flags
+ become_pass:
+ description: Password to pass to su
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_su_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SU_PASS
+ ini:
+ - section: su_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ - Do NOT add a colon (:) to your custom entries. Ansible adds a colon at the end of each prompt;
+ if you add another one in your string, your prompt will fail with a "Timeout" error.
+ default: []
+ type: list
+ elements: string
+ ini:
+ - section: su_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_su_prompt_l10n
+ env:
+ - name: ANSIBLE_SU_PROMPT_L10N
+"""
+
+import re
+import shlex
+
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'su'
+
+ # messages for detecting prompted password issues
+ fail = ('Authentication failure',)
+
+ SU_PROMPT_LOCALIZATIONS = [
+ 'Password',
+ '암호',
+ 'パスワード',
+ 'Adgangskode',
+ 'Contraseña',
+ 'Contrasenya',
+ 'Hasło',
+ 'Heslo',
+ 'Jelszó',
+ 'Lösenord',
+ 'Mật khẩu',
+ 'Mot de passe',
+ 'Parola',
+ 'Parool',
+ 'Pasahitza',
+ 'Passord',
+ 'Passwort',
+ 'Salasana',
+ 'Sandi',
+ 'Senha',
+ 'Wachtwoord',
+ 'ססמה',
+ 'Лозинка',
+ 'Парола',
+ 'Пароль',
+ 'गुप्तशब्द',
+ 'शब्दकूट',
+ 'సంకేతపదము',
+ 'හස්පදය',
+ '密码',
+ '密碼',
+ '口令',
+ ]
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ prompts = self.get_option('prompt_l10n') or self.SU_PROMPT_LOCALIZATIONS
+ b_password_string = b"|".join((br'(\w+\'s )?' + to_bytes(p)) for p in prompts)
+ # Colon or unicode fullwidth colon
+ b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
+ b_su_prompt_localizations_re = re.compile(b_password_string, flags=re.IGNORECASE)
+ return bool(b_su_prompt_localizations_re.match(b_output))
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ # Prompt handling for ``su`` is more complicated, this
+ # is used to satisfy the connection plugin
+ self.prompt = True
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe') or self.name
+ flags = self.get_option('become_flags') or ''
+ user = self.get_option('become_user') or ''
+ success_cmd = self._build_success_command(cmd, shell)
+
+ return "%s %s %s -c %s" % (exe, flags, user, shlex.quote(success_cmd))
diff --git a/lib/ansible/plugins/become/sudo.py b/lib/ansible/plugins/become/sudo.py
new file mode 100644
index 0000000..fb285f0
--- /dev/null
+++ b/lib/ansible/plugins/become/sudo.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: sudo
+ short_description: Substitute User DO
+ description:
+ - This become plugin allows your remote/login user to execute commands as another user via the sudo utility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sudo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sudo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SUDO_USER
+ keyword:
+ - name: become_user
+ become_exe:
+ description: Sudo executable
+ default: sudo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sudo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sudo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SUDO_EXE
+ keyword:
+ - name: become_exe
+ become_flags:
+ description: Options to pass to sudo
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sudo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sudo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SUDO_FLAGS
+ keyword:
+ - name: become_flags
+ become_pass:
+ description: Password to pass to sudo
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sudo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SUDO_PASS
+ ini:
+ - section: sudo_become_plugin
+ key: password
+"""
+
+import re
+import shlex
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'sudo'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+ missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = self.get_option('become_exe') or self.name
+
+ flags = self.get_option('become_flags') or ''
+ prompt = ''
+ if self.get_option('become_pass'):
+ self.prompt = '[sudo via ansible, key=%s] password:' % self._id
+ if flags: # this could be simplified, but kept as is for now for backwards string matching
+ reflag = []
+ for flag in shlex.split(flags):
+ if flag in ('-n', '--non-interactive'):
+ continue
+ elif not flag.startswith('--'):
+ # handle -XnxxX flags only
+ flag = re.sub(r'^(-\w*)n(\w*.*)', r'\1\2', flag)
+ reflag.append(flag)
+ flags = shlex.join(reflag)
+
+ prompt = '-p "%s"' % (self.prompt)
+
+ user = self.get_option('become_user') or ''
+ if user:
+ user = '-u %s' % (user)
+
+ return ' '.join([becomecmd, flags, prompt, user, self._build_success_command(cmd, shell)])
diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
new file mode 100644
index 0000000..3fb0d9b
--- /dev/null
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -0,0 +1,375 @@
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2018, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import errno
+import os
+import tempfile
+import time
+
+from abc import abstractmethod
+from collections.abc import MutableMapping
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins import AnsiblePlugin
+from ansible.plugins.loader import cache_loader
+from ansible.utils.collection_loader import resource_from_fqcr
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class BaseCacheModule(AnsiblePlugin):
+
+ # Backwards compat only. Just import the global display instead
+ _display = display
+
+ def __init__(self, *args, **kwargs):
+ super(BaseCacheModule, self).__init__()
+ self.set_options(var_options=args, direct=kwargs)
+
+ @abstractmethod
+ def get(self, key):
+ pass
+
+ @abstractmethod
+ def set(self, key, value):
+ pass
+
+ @abstractmethod
+ def keys(self):
+ pass
+
+ @abstractmethod
+ def contains(self, key):
+ pass
+
+ @abstractmethod
+ def delete(self, key):
+ pass
+
+ @abstractmethod
+ def flush(self):
+ pass
+
+ @abstractmethod
+ def copy(self):
+ pass
+
+
+class BaseFileCacheModule(BaseCacheModule):
+ """
+ A caching module backed by file based storage.
+ """
+ def __init__(self, *args, **kwargs):
+
+ try:
+ super(BaseFileCacheModule, self).__init__(*args, **kwargs)
+ self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
+ self._timeout = float(self.get_option('_timeout'))
+ except KeyError:
+ self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self.plugin_name = resource_from_fqcr(self.__module__)
+ self._cache = {}
+ self.validate_cache_connection()
+
+ def _get_cache_connection(self, source):
+ if source:
+ try:
+ return os.path.expanduser(os.path.expandvars(source))
+ except TypeError:
+ pass
+
+ def validate_cache_connection(self):
+ if not self._cache_dir:
+ raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
+ "to be set (to a writeable directory path)" % self.plugin_name)
+
+ if not os.path.exists(self._cache_dir):
+ try:
+ os.makedirs(self._cache_dir)
+ except (OSError, IOError) as e:
+ raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
+ else:
+ for x in (os.R_OK, os.W_OK, os.X_OK):
+ if not os.access(self._cache_dir, x):
+ raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
+ self.plugin_name, self._cache_dir))
+
+ def _get_cache_file_name(self, key):
+ prefix = self.get_option('_prefix')
+ if prefix:
+ cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
+ else:
+ cachefile = "%s/%s" % (self._cache_dir, key)
+ return cachefile
+
+ def get(self, key):
+ """ This checks the in memory cache first as the fact was not expired at 'gather time'
+ and it would be problematic if the key did expire after some long running tasks and
+ user gets 'undefined' error in the same play """
+
+ if key not in self._cache:
+
+ if self.has_expired(key) or key == "":
+ raise KeyError
+
+ cachefile = self._get_cache_file_name(key)
+ try:
+ value = self._load(cachefile)
+ self._cache[key] = value
+ except ValueError as e:
+ display.warning("error in '%s' cache plugin while trying to read %s : %s. "
+ "Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
+ self.delete(key)
+ raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
+ "It has been removed, so you can re-run your command now." % cachefile)
+ except (OSError, IOError) as e:
+ display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ raise KeyError
+ except Exception as e:
+ raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ self._cache[key] = value
+
+ cachefile = self._get_cache_file_name(key)
+ tmpfile_handle, tmpfile_path = tempfile.mkstemp(dir=os.path.dirname(cachefile))
+ try:
+ try:
+ self._dump(value, tmpfile_path)
+ except (OSError, IOError) as e:
+ display.warning("error in '%s' cache plugin while trying to write to '%s' : %s" % (self.plugin_name, tmpfile_path, to_bytes(e)))
+ try:
+ os.rename(tmpfile_path, cachefile)
+ except (OSError, IOError) as e:
+ display.warning("error in '%s' cache plugin while trying to move '%s' to '%s' : %s" % (self.plugin_name, tmpfile_path, cachefile, to_bytes(e)))
+ finally:
+ try:
+ os.unlink(tmpfile_path)
+ except OSError:
+ pass
+
+ def has_expired(self, key):
+
+ if self._timeout == 0:
+ return False
+
+ cachefile = self._get_cache_file_name(key)
+ try:
+ st = os.stat(cachefile)
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ return False
+
+ if time.time() - st.st_mtime <= self._timeout:
+ return False
+
+ if key in self._cache:
+ del self._cache[key]
+ return True
+
+ def keys(self):
+ # When using a prefix we must remove it from the key name before
+ # checking the expiry and returning it to the caller. Keys that do not
+ # share the same prefix cannot be fetched from the cache.
+ prefix = self.get_option('_prefix')
+ prefix_length = len(prefix)
+ keys = []
+ for k in os.listdir(self._cache_dir):
+ if k.startswith('.') or not k.startswith(prefix):
+ continue
+
+ k = k[prefix_length:]
+ if not self.has_expired(k):
+ keys.append(k)
+
+ return keys
+
+ def contains(self, key):
+ cachefile = self._get_cache_file_name(key)
+
+ if key in self._cache:
+ return True
+
+ if self.has_expired(key):
+ return False
+ try:
+ os.stat(cachefile)
+ return True
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+
+ def delete(self, key):
+ try:
+ del self._cache[key]
+ except KeyError:
+ pass
+ try:
+ os.remove(self._get_cache_file_name(key))
+ except (OSError, IOError):
+ pass # TODO: only pass on non existing?
+
+ def flush(self):
+ self._cache = {}
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
+
+ @abstractmethod
+ def _load(self, filepath):
+ """
+ Read data from a filepath and return it as a value
+
+ :arg filepath: The filepath to read from.
+ :returns: The value stored in the filepath
+
+ This method reads from the file on disk and takes care of any parsing
+ and transformation of the data before returning it. The value
+ returned should be what Ansible would expect if it were uncached data.
+
+ .. note:: Filehandles have advantages but calling code doesn't know
+ whether this file is text or binary, should be decoded, or accessed via
+ a library function. Therefore the API uses a filepath and opens
+ the file inside of the method.
+ """
+ pass
+
+ @abstractmethod
+ def _dump(self, value, filepath):
+ """
+ Write data to a filepath
+
+ :arg value: The value to store
+ :arg filepath: The filepath to store it at
+ """
+ pass
+
+
+class CachePluginAdjudicator(MutableMapping):
+ """
+ Intermediary between a cache dictionary and a CacheModule
+ """
+ def __init__(self, plugin_name='memory', **kwargs):
+ self._cache = {}
+ self._retrieved = {}
+
+ self._plugin = cache_loader.get(plugin_name, **kwargs)
+ if not self._plugin:
+ raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
+
+ self._plugin_name = plugin_name
+
+ def update_cache_if_changed(self):
+ if self._retrieved != self._cache:
+ self.set_cache()
+
+ def set_cache(self):
+ for top_level_cache_key in self._cache.keys():
+ self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
+ self._retrieved = copy.deepcopy(self._cache)
+
+ def load_whole_cache(self):
+ for key in self._plugin.keys():
+ self._cache[key] = self._plugin.get(key)
+
+ def __repr__(self):
+ return to_text(self._cache)
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __len__(self):
+ return len(self.keys())
+
+ def _do_load_key(self, key):
+ load = False
+ if all([
+ key not in self._cache,
+ key not in self._retrieved,
+ self._plugin_name != 'memory',
+ self._plugin.contains(key),
+ ]):
+ load = True
+ return load
+
+ def __getitem__(self, key):
+ if self._do_load_key(key):
+ try:
+ self._cache[key] = self._plugin.get(key)
+ except KeyError:
+ pass
+ else:
+ self._retrieved[key] = self._cache[key]
+ return self._cache[key]
+
+ def get(self, key, default=None):
+ if self._do_load_key(key):
+ try:
+ self._cache[key] = self._plugin.get(key)
+ except KeyError as e:
+ pass
+ else:
+ self._retrieved[key] = self._cache[key]
+ return self._cache.get(key, default)
+
+ def items(self):
+ return self._cache.items()
+
+ def values(self):
+ return self._cache.values()
+
+ def keys(self):
+ return self._cache.keys()
+
+ def pop(self, key, *args):
+ if args:
+ return self._cache.pop(key, args[0])
+ return self._cache.pop(key)
+
+ def __delitem__(self, key):
+ del self._cache[key]
+
+ def __setitem__(self, key, value):
+ self._cache[key] = value
+
+ def flush(self):
+ self._plugin.flush()
+ self._cache = {}
+
+ def update(self, value):
+ self._cache.update(value)
diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py
new file mode 100644
index 0000000..692b1b3
--- /dev/null
+++ b/lib/ansible/plugins/cache/base.py
@@ -0,0 +1,21 @@
+# (c) 2017, ansible by Red Hat
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# moved actual classes to __init__ kept here for backward compat with 3rd parties
+from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
new file mode 100644
index 0000000..a26828a
--- /dev/null
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -0,0 +1,64 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: jsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ version_added: "1.9"
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ type: path
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import codecs
+import json
+
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+
+ def _load(self, filepath):
+ # Valid JSON is always UTF-8 encoded.
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return json.load(f, cls=AnsibleJSONDecoder)
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
new file mode 100644
index 0000000..59f97b6
--- /dev/null
+++ b/lib/ansible/plugins/cache/memory.py
@@ -0,0 +1,53 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: memory
+ short_description: RAM backed, non persistent
+ description:
+ - RAM backed cache that is not persistent.
+ - This is the default used if no other plugin is specified.
+ - There are no options to configure.
+ version_added: historical
+ author: core team (@ansible-core)
+'''
+
+from ansible.plugins.cache import BaseCacheModule
+
+
+class CacheModule(BaseCacheModule):
+
+ def __init__(self, *args, **kwargs):
+ self._cache = {}
+
+ def get(self, key):
+ return self._cache.get(key)
+
+ def set(self, key, value):
+ self._cache[key] = value
+
+ def keys(self):
+ return self._cache.keys()
+
+ def contains(self, key):
+ return key in self._cache
+
+ def delete(self, key):
+ del self._cache[key]
+
+ def flush(self):
+ self._cache = {}
+
+ def copy(self):
+ return self._cache.copy()
+
+ def __getstate__(self):
+ return self.copy()
+
+ def __setstate__(self, data):
+ self._cache = data
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
new file mode 100644
index 0000000..d4fc347
--- /dev/null
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -0,0 +1,610 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import difflib
+import json
+import re
+import sys
+import textwrap
+
+from collections import OrderedDict
+from collections.abc import MutableMapping
+from copy import deepcopy
+
+from ansible import constants as C
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six import text_type
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.parsing.yaml.objects import AnsibleUnicode
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText, NativeJinjaUnsafeText
+from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
+
+import yaml
+
+global_display = Display()
+
+
+__all__ = ["CallbackBase"]
+
+
+_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
+_YAML_TEXT_TYPES = (text_type, AnsibleUnicode, AnsibleUnsafeText, NativeJinjaUnsafeText)
+# Characters that libyaml/pyyaml consider breaks
+_YAML_BREAK_CHARS = '\n\x85\u2028\u2029' # NL, NEL, LS, PS
+# regex representation of libyaml/pyyaml of a space followed by a break character
+_SPACE_BREAK_RE = re.compile(fr' +([{_YAML_BREAK_CHARS}])')
+
+
+class _AnsibleCallbackDumper(AnsibleDumper):
+ def __init__(self, lossy=False):
+ self._lossy = lossy
+
+ def __call__(self, *args, **kwargs):
+ # pyyaml expects that we are passing an object that can be instantiated, but to
+ # smuggle the ``lossy`` configuration, we do that in ``__init__`` and then
+ # define this ``__call__`` that will mimic the ability for pyyaml to instantiate class
+ super().__init__(*args, **kwargs)
+ return self
+
+
+def _should_use_block(scalar):
+ """Returns true if string should be in block format based on the existence of various newline separators"""
+ # This method of searching is faster than using a regex
+ for ch in _YAML_BREAK_CHARS:
+ if ch in scalar:
+ return True
+ return False
+
+
+class _SpecialCharacterTranslator:
+ def __getitem__(self, ch):
+ # "special character" logic from pyyaml yaml.emitter.Emitter.analyze_scalar, translated to decimal
+ # for perf w/ str.translate
+ if (ch == 10 or
+ 32 <= ch <= 126 or
+ ch == 133 or
+ 160 <= ch <= 55295 or
+ 57344 <= ch <= 65533 or
+ 65536 <= ch < 1114111)\
+ and ch != 65279:
+ return ch
+ return None
+
+
+def _filter_yaml_special(scalar):
+ """Filter a string removing any character that libyaml/pyyaml declare as special"""
+ return scalar.translate(_SpecialCharacterTranslator())
+
+
+def _munge_data_for_lossy_yaml(scalar):
+ """Modify a string so that analyze_scalar in libyaml/pyyaml will allow block formatting"""
+ # we care more about readability than accuracy, so...
+ # ...libyaml/pyyaml does not permit trailing spaces for block scalars
+ scalar = scalar.rstrip()
+ # ...libyaml/pyyaml does not permit tabs for block scalars
+ scalar = scalar.expandtabs()
+ # ...libyaml/pyyaml only permits special characters for double quoted scalars
+ scalar = _filter_yaml_special(scalar)
+ # ...libyaml/pyyaml only permits spaces followed by breaks for double quoted scalars
+ return _SPACE_BREAK_RE.sub(r'\1', scalar)
+
+
+def _pretty_represent_str(self, data):
+ """Uses block style for multi-line strings"""
+ data = text_type(data)
+ if _should_use_block(data):
+ style = '|'
+ if self._lossy:
+ data = _munge_data_for_lossy_yaml(data)
+ else:
+ style = self.default_style
+
+ node = yaml.representer.ScalarNode('tag:yaml.org,2002:str', data, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+
+for data_type in _YAML_TEXT_TYPES:
+ _AnsibleCallbackDumper.add_representer(
+ data_type,
+ _pretty_represent_str
+ )
+
+
+class CallbackBase(AnsiblePlugin):
+
+ '''
+ This is a base ansible callback class that does nothing. New callbacks should
+ use this class as a base and override any callback methods they wish to execute
+ custom actions.
+ '''
+
+ def __init__(self, display=None, options=None):
+ if display:
+ self._display = display
+ else:
+ self._display = global_display
+
+ if self._display.verbosity >= 4:
+ name = getattr(self, 'CALLBACK_NAME', 'unnamed')
+ ctype = getattr(self, 'CALLBACK_TYPE', 'old')
+ version = getattr(self, 'CALLBACK_VERSION', '1.0')
+ self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
+
+ self.disabled = False
+ self.wants_implicit_tasks = False
+
+ self._plugin_options = {}
+ if options is not None:
+ self.set_options(options)
+
+ self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
+
+ ''' helper for callbacks, so they don't all have to include deepcopy '''
+ _copy_result = deepcopy
+
+ def set_option(self, k, v):
+ self._plugin_options[k] = v
+
+ def get_option(self, k):
+ return self._plugin_options[k]
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ ''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
+ Also _options was already taken for CLI args and callbacks use _plugin_options instead.
+ '''
+
+ # load from config
+ self._plugin_options = C.config.get_plugin_options(self.plugin_type, self._load_name, keys=task_keys, variables=var_options, direct=direct)
+
+ @staticmethod
+ def host_label(result):
+ """Return label for the hostname (& delegated hostname) of a task
+ result.
+ """
+ label = "%s" % result._host.get_name()
+ if result._task.delegate_to and result._task.delegate_to != result._host.get_name():
+ # show delegated host
+ label += " -> %s" % result._task.delegate_to
+ # in case we have 'extra resolution'
+ ahost = result._result.get('_ansible_delegated_vars', {}).get('ansible_host', result._task.delegate_to)
+ if result._task.delegate_to != ahost:
+ label += "(%s)" % ahost
+ return label
+
+ def _run_is_verbose(self, result, verbosity=0):
+ return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
+ and result._result.get('_ansible_verbose_override', False) is False)
+
+ def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False, serialize=True):
+ try:
+ result_format = self.get_option('result_format')
+ except KeyError:
+ # Callback does not declare result_format nor extend result_format_callback
+ result_format = 'json'
+
+ try:
+ pretty_results = self.get_option('pretty_results')
+ except KeyError:
+ # Callback does not declare pretty_results nor extend result_format_callback
+ pretty_results = None
+
+ indent_conditions = (
+ result.get('_ansible_verbose_always'),
+ pretty_results is None and result_format != 'json',
+ pretty_results is True,
+ self._display.verbosity > 2,
+ )
+
+ if not indent and any(indent_conditions):
+ indent = 4
+ if pretty_results is False:
+ # pretty_results=False overrides any specified indentation
+ indent = None
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ abridged_result = strip_internal_keys(module_response_deepcopy(result))
+
+ # remove invocation unless specifically wanting it
+ if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
+ del abridged_result['invocation']
+
+ # remove diff information from screen output
+ if self._display.verbosity < 3 and 'diff' in result:
+ del abridged_result['diff']
+
+ # remove exception from screen output
+ if 'exception' in abridged_result:
+ del abridged_result['exception']
+
+ if not serialize:
+ # Just return ``abridged_result`` without going through serialization
+ # to permit callbacks to take advantage of ``_dump_results``
+ # that want to further modify the result, or use custom serialization
+ return abridged_result
+
+ if result_format == 'json':
+ try:
+ return json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
+ except TypeError:
+ # Python3 bug: throws an exception when keys are non-homogenous types:
+ # https://bugs.python.org/issue25457
+ # sort into an OrderedDict and then json.dumps() that instead
+ if not OrderedDict:
+ raise
+ return json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
+ cls=AnsibleJSONEncoder, indent=indent,
+ ensure_ascii=False, sort_keys=False)
+ elif result_format == 'yaml':
+ # None is a sentinel in this case that indicates default behavior
+ # default behavior for yaml is to prettify results
+ lossy = pretty_results in (None, True)
+ if lossy:
+ # if we already have stdout, we don't need stdout_lines
+ if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
+ abridged_result['stdout_lines'] = '<omitted>'
+
+ # if we already have stderr, we don't need stderr_lines
+ if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
+ abridged_result['stderr_lines'] = '<omitted>'
+
+ return '\n%s' % textwrap.indent(
+ yaml.dump(
+ abridged_result,
+ allow_unicode=True,
+ Dumper=_AnsibleCallbackDumper(lossy=lossy),
+ default_flow_style=False,
+ indent=indent,
+ # sort_keys=sort_keys # This requires PyYAML>=5.1
+ ),
+ ' ' * (indent or 4)
+ )
+
+ def _handle_warnings(self, res):
+ ''' display warnings, if enabled and any exist in the result '''
+ if C.ACTION_WARNINGS:
+ if 'warnings' in res and res['warnings']:
+ for warning in res['warnings']:
+ self._display.warning(warning)
+ del res['warnings']
+ if 'deprecations' in res and res['deprecations']:
+ for warning in res['deprecations']:
+ self._display.deprecated(**warning)
+ del res['deprecations']
+
+ def _handle_exception(self, result, use_stderr=False):
+
+ if 'exception' in result:
+ msg = "An exception occurred during task execution. "
+ exception_str = to_text(result['exception'])
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = exception_str.strip().split('\n')[-1]
+ msg += "To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "The full traceback is:\n" + exception_str
+ del result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
+
+ def _serialize_diff(self, diff):
+ try:
+ result_format = self.get_option('result_format')
+ except KeyError:
+ # Callback does not declare result_format nor extend result_format_callback
+ result_format = 'json'
+
+ try:
+ pretty_results = self.get_option('pretty_results')
+ except KeyError:
+ # Callback does not declare pretty_results nor extend result_format_callback
+ pretty_results = None
+
+ if result_format == 'json':
+ return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
+ elif result_format == 'yaml':
+ # None is a sentinel in this case that indicates default behavior
+ # default behavior for yaml is to prettify results
+ lossy = pretty_results in (None, True)
+ return '%s\n' % textwrap.indent(
+ yaml.dump(
+ diff,
+ allow_unicode=True,
+ Dumper=_AnsibleCallbackDumper(lossy=lossy),
+ default_flow_style=False,
+ indent=4,
+ # sort_keys=sort_keys # This requires PyYAML>=5.1
+ ),
+ ' '
+ )
+
+ def _get_diff(self, difflist):
+
+ if not isinstance(difflist, list):
+ difflist = [difflist]
+
+ ret = []
+ for diff in difflist:
+ if 'dst_binary' in diff:
+ ret.append(u"diff skipped: destination file appears to be binary\n")
+ if 'src_binary' in diff:
+ ret.append(u"diff skipped: source file appears to be binary\n")
+ if 'dst_larger' in diff:
+ ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
+ if 'src_larger' in diff:
+ ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
+ if 'before' in diff and 'after' in diff:
+ # format complex structures into 'files'
+ for x in ['before', 'after']:
+ if isinstance(diff[x], MutableMapping):
+ diff[x] = self._serialize_diff(diff[x])
+ elif diff[x] is None:
+ diff[x] = ''
+ if 'before_header' in diff:
+ before_header = u"before: %s" % diff['before_header']
+ else:
+ before_header = u'before'
+ if 'after_header' in diff:
+ after_header = u"after: %s" % diff['after_header']
+ else:
+ after_header = u'after'
+ before_lines = diff['before'].splitlines(True)
+ after_lines = diff['after'].splitlines(True)
+ if before_lines and not before_lines[-1].endswith(u'\n'):
+ before_lines[-1] += u'\n\\ No newline at end of file\n'
+ if after_lines and not after_lines[-1].endswith('\n'):
+ after_lines[-1] += u'\n\\ No newline at end of file\n'
+ differ = difflib.unified_diff(before_lines,
+ after_lines,
+ fromfile=before_header,
+ tofile=after_header,
+ fromfiledate=u'',
+ tofiledate=u'',
+ n=C.DIFF_CONTEXT)
+ difflines = list(differ)
+ has_diff = False
+ for line in difflines:
+ has_diff = True
+ if line.startswith(u'+'):
+ line = stringc(line, C.COLOR_DIFF_ADD)
+ elif line.startswith(u'-'):
+ line = stringc(line, C.COLOR_DIFF_REMOVE)
+ elif line.startswith(u'@@'):
+ line = stringc(line, C.COLOR_DIFF_LINES)
+ ret.append(line)
+ if has_diff:
+ ret.append('\n')
+ if 'prepared' in diff:
+ ret.append(diff['prepared'])
+ return u''.join(ret)
+
+ def _get_item_label(self, result):
+ ''' retrieves the value to be displayed as a label for an item entry from a result object'''
+ if result.get('_ansible_no_log', False):
+ item = "(censored due to no_log)"
+ else:
+ item = result.get('_ansible_item_label', result.get('item'))
+ return item
+
+ def _process_items(self, result):
+ # just remove them as now they get handled by individual callbacks
+ del result._result['results']
+
+ def _clean_results(self, result, task_name):
+ ''' removes data from results for display '''
+
+ # mostly controls that debug only outputs what it was meant to
+ if task_name in C._ACTION_DEBUG:
+ if 'msg' in result:
+ # msg should be alone
+ for key in list(result.keys()):
+ if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
+ result.pop(key)
+ else:
+ # 'var' value as field, so eliminate others and what is left should be varname
+ for hidme in self._hide_in_debug:
+ result.pop(hidme, None)
+
+ def _print_task_path(self, task, color=C.COLOR_DEBUG):
+ path = task.get_path()
+ if path:
+ self._display.display(u"task path: %s" % path, color=color)
+
+ def set_play_context(self, play_context):
+ pass
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ pass
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def runner_on_async_failed(self, host, res, jid):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
+
+ def on_file_diff(self, host, diff):
+ pass
+
+ # V2 METHODS, by default they call v1 counterparts if possible
+ def v2_on_any(self, *args, **kwargs):
+ self.on_any(args, kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ host = result._host.get_name()
+ self.runner_on_failed(host, result._result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ host = result._host.get_name()
+ self.runner_on_ok(host, result._result)
+
+ def v2_runner_on_skipped(self, result):
+ if C.DISPLAY_SKIPPED_HOSTS:
+ host = result._host.get_name()
+ self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
+
+ def v2_runner_on_unreachable(self, result):
+ host = result._host.get_name()
+ self.runner_on_unreachable(host, result._result)
+
+ def v2_runner_on_async_poll(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ # FIXME, get real clock
+ clock = 0
+ self.runner_on_async_poll(host, result._result, jid, clock)
+
+ def v2_runner_on_async_ok(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.runner_on_async_ok(host, result._result, jid)
+
+ def v2_runner_on_async_failed(self, result):
+ host = result._host.get_name()
+ # Attempt to get the async job ID. If the job does not finish before the
+ # async timeout value, the ID may be within the unparsed 'async_result' dict.
+ jid = result._result.get('ansible_job_id')
+ if not jid and 'async_result' in result._result:
+ jid = result._result['async_result'].get('ansible_job_id')
+ self.runner_on_async_failed(host, result._result, jid)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook_on_start()
+
+ def v2_playbook_on_notify(self, handler, host):
+ self.playbook_on_notify(host, handler)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self.playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self.playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.playbook_on_task_start(task.name, is_conditional)
+
+ # FIXME: not called
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass # no v1 correspondence
+
+ def v2_playbook_on_handler_task_start(self, task):
+ pass # no v1 correspondence
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+ self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
+
+ # FIXME: not called
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ host = result._host.get_name()
+ self.playbook_on_import_for_host(host, imported_file)
+
+ # FIXME: not called
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ host = result._host.get_name()
+ self.playbook_on_not_import_for_host(host, missing_file)
+
+ def v2_playbook_on_play_start(self, play):
+ self.playbook_on_play_start(play.name)
+
+ def v2_playbook_on_stats(self, stats):
+ self.playbook_on_stats(stats)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result:
+ host = result._host.get_name()
+ self.on_file_diff(host, result._result['diff'])
+
+ def v2_playbook_on_include(self, included_file):
+ pass # no v1 correspondence
+
+ def v2_runner_item_on_ok(self, result):
+ pass
+
+ def v2_runner_item_on_failed(self, result):
+ pass
+
+ def v2_runner_item_on_skipped(self, result):
+ pass
+
+ def v2_runner_retry(self, result):
+ pass
+
+ def v2_runner_on_start(self, host, task):
+ """Event used when host begins execution of a task
+
+ .. versionadded:: 2.8
+ """
+ pass
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
new file mode 100644
index 0000000..54ef452
--- /dev/null
+++ b/lib/ansible/plugins/callback/default.py
@@ -0,0 +1,409 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: default
+ type: stdout
+ short_description: default Ansible screen output
+ version_added: historical
+ description:
+ - This is the default output callback for ansible-playbook.
+ extends_documentation_fragment:
+ - default_callback
+ - result_format_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+
+from ansible import constants as C
+from ansible import context
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.utils.fqcn import add_internal_fqcns
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'default'
+
+ def __init__(self):
+
+ self._play = None
+ self._last_task_banner = None
+ self._last_task_name = None
+ self._task_type_cache = {}
+ super(CallbackModule, self).__init__()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ host_label = self.host_label(result)
+ self._clean_results(result._result, result._task.action)
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
+ self._print_task_path(result._task)
+ msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result._result))
+ self._display.display(msg, color=C.COLOR_ERROR, stderr=self.get_option('display_failed_stderr'))
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_on_ok(self, result):
+
+ host_label = self.host_label(result)
+
+ if isinstance(result._task, TaskInclude):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+ return
+ elif result._result.get('changed', False):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = "changed: [%s]" % (host_label,)
+ color = C.COLOR_CHANGED
+ else:
+ if not self.get_option('display_ok_hosts'):
+ return
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = "ok: [%s]" % (host_label,)
+ color = C.COLOR_OK
+
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._clean_results(result._result, result._task.action)
+
+ if self._run_is_verbose(result):
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_skipped(self, result):
+
+ if self.get_option('display_skipped_hosts'):
+
+ self._clean_results(result._result, result._task.action)
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if result._task.loop is not None and 'results' in result._result:
+ self._process_items(result)
+
+ msg = "skipping: [%s]" % result._host.get_name()
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ host_label = self.host_label(result)
+ msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result._result))
+ self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.get_option('display_failed_stderr'))
+
+ if result._task.ignore_unreachable:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._task_start(task, prefix='TASK')
+
+ def _task_start(self, task, prefix=None):
+ # Cache output prefix for task if provided
+ # This is needed to properly display 'RUNNING HANDLER' and similar
+ # when hiding skipped/ok task results
+ if prefix is not None:
+ self._task_type_cache[task._uuid] = prefix
+
+ # Preserve task name, as all vars may not be available for templating
+ # when we need it later
+ if self._play.strategy in add_internal_fqcns(('free', 'host_pinned')):
+ # Explicitly set to None for strategy free/host_pinned to account for any cached
+ # task title from a previous non-free play
+ self._last_task_name = None
+ else:
+ self._last_task_name = task.get_name().strip()
+
+ # Display the task banner immediately if we're not doing any filtering based on task result
+ if self.get_option('display_skipped_hosts') and self.get_option('display_ok_hosts'):
+ self._print_task_banner(task)
+
+ def _print_task_banner(self, task):
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ args = ''
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = u', '.join(u'%s=%s' % a for a in task.args.items())
+ args = u' %s' % args
+
+ prefix = self._task_type_cache.get(task._uuid, 'TASK')
+
+ # Use cached task name
+ task_name = self._last_task_name
+ if task_name is None:
+ task_name = task.get_name().strip()
+
+ if task.check_mode and self.get_option('check_mode_markers'):
+ checkmsg = " [CHECK MODE]"
+ else:
+ checkmsg = ""
+ self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
+
+ if self._display.verbosity >= 2:
+ self._print_task_path(task)
+
+ self._last_task_banner = task._uuid
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._task_start(task, prefix='CLEANUP TASK')
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._task_start(task, prefix='RUNNING HANDLER')
+
+ def v2_runner_on_start(self, host, task):
+ if self.get_option('show_per_host_start'):
+ self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if play.check_mode and self.get_option('check_mode_markers'):
+ checkmsg = " [CHECK MODE]"
+ else:
+ checkmsg = ""
+ if not name:
+ msg = u"PLAY%s" % checkmsg
+ else:
+ msg = u"PLAY [%s]%s" % (name, checkmsg)
+
+ self._play = play
+
+ self._display.banner(msg)
+
+ def v2_on_file_diff(self, result):
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff'] and res.get('changed', False):
+ diff = self._get_diff(res['diff'])
+ if diff:
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+ self._display.display(diff)
+ elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
+ diff = self._get_diff(result._result['diff'])
+ if diff:
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+ self._display.display(diff)
+
+ def v2_runner_item_on_ok(self, result):
+
+ host_label = self.host_label(result)
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = 'changed'
+ color = C.COLOR_CHANGED
+ else:
+ if not self.get_option('display_ok_hosts'):
+ return
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = 'ok'
+ color = C.COLOR_OK
+
+ msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result._result))
+ self._clean_results(result._result, result._task.action)
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ def v2_runner_item_on_failed(self, result):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ host_label = self.host_label(result)
+ self._clean_results(result._result, result._task.action)
+ self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
+
+ msg = "failed: [%s]" % (host_label,)
+ self._handle_warnings(result._result)
+ self._display.display(
+ msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)),
+ color=C.COLOR_ERROR,
+ stderr=self.get_option('display_failed_stderr')
+ )
+
+ def v2_runner_item_on_skipped(self, result):
+ if self.get_option('display_skipped_hosts'):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._clean_results(result._result, result._task.action)
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_playbook_on_include(self, included_file):
+ msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
+ label = self._get_item_label(included_file._vars)
+ if label:
+ msg += " => (item=%s)" % label
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+
+ self._display.display(
+ u"%s : %s %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'skipped', t['skipped'], C.COLOR_SKIP),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN),
+ ),
+ screen_only=True
+ )
+
+ self._display.display(
+ u"%s : %s %s %s %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None),
+ colorize(u'skipped', t['skipped'], None),
+ colorize(u'rescued', t['rescued'], None),
+ colorize(u'ignored', t['ignored'], None),
+ ),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+
+ # print custom stats if required
+ if stats.custom and self.get_option('show_custom_stats'):
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
+ self._display.banner("DRY RUN")
+
+ def v2_playbook_on_start(self, playbook):
+ if self._display.verbosity > 1:
+ from os.path import basename
+ self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
+
+ # show CLI arguments
+ if self._display.verbosity > 3:
+ if context.CLIARGS.get('args'):
+ self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ color=C.COLOR_VERBOSE, screen_only=True)
+
+ for argument in (a for a in context.CLIARGS if a != 'args'):
+ val = context.CLIARGS[argument]
+ if val:
+ self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True)
+
+ if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
+ self._display.banner("DRY RUN")
+
+ def v2_runner_retry(self, result):
+ task_name = result.task_name or result._task
+ host_label = self.host_label(result)
+ msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result._result['retries'] - result._result['attempts'])
+ if self._run_is_verbose(result, verbosity=2):
+ msg += "Result was: %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_DEBUG)
+
+ def v2_runner_on_async_poll(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ started = result._result.get('started')
+ finished = result._result.get('finished')
+ self._display.display(
+ 'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
+ color=C.COLOR_DEBUG
+ )
+
+ def v2_runner_on_async_ok(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self._display.display("ASYNC OK on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
+
+ def v2_runner_on_async_failed(self, result):
+ host = result._host.get_name()
+
+ # Attempt to get the async job ID. If the job does not finish before the
+ # async timeout value, the ID may be within the unparsed 'async_result' dict.
+ jid = result._result.get('ansible_job_id')
+ if not jid and 'async_result' in result._result:
+ jid = result._result['async_result'].get('ansible_job_id')
+ self._display.display("ASYNC FAILED on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
+
+ def v2_playbook_on_notify(self, handler, host):
+ if self._display.verbosity > 1:
+ self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
diff --git a/lib/ansible/plugins/callback/junit.py b/lib/ansible/plugins/callback/junit.py
new file mode 100644
index 0000000..75cdbc7
--- /dev/null
+++ b/lib/ansible/plugins/callback/junit.py
@@ -0,0 +1,364 @@
+# (c) 2016 Matt Clay <matt@mystile.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: junit
+ type: aggregate
+ short_description: write playbook output to a JUnit file.
+ version_added: historical
+ description:
+ - This callback writes playbook output to a JUnit formatted XML file.
+ - "Tasks show up in the report as follows:
+ 'ok': pass
+ 'failed' with 'EXPECTED FAILURE' in the task name: pass
+ 'failed' with 'TOGGLE RESULT' in the task name: pass
+ 'ok' with 'TOGGLE RESULT' in the task name: failure
+ 'failed' due to an exception: error
+ 'failed' for other reasons: failure
+ 'skipped': skipped"
+ options:
+ output_dir:
+ name: JUnit output dir
+ default: ~/.ansible.log
+ description: Directory to write XML files to.
+ env:
+ - name: JUNIT_OUTPUT_DIR
+ task_class:
+ name: JUnit Task class
+ default: False
+ description: Configure the output to be one class per yaml file
+ env:
+ - name: JUNIT_TASK_CLASS
+ task_relative_path:
+ name: JUnit Task relative path
+ default: none
+ description: Configure the output to use relative paths to given directory
+ version_added: "2.8"
+ env:
+ - name: JUNIT_TASK_RELATIVE_PATH
+ replace_out_of_tree_path:
+ name: Replace out of tree path
+ default: none
+ description: Replace the directory portion of an out-of-tree relative task path with the given placeholder
+ version_added: "2.12.3"
+ env:
+ - name: JUNIT_REPLACE_OUT_OF_TREE_PATH
+ fail_on_change:
+ name: JUnit fail on change
+ default: False
+ description: Consider any tasks reporting "changed" as a junit test failure
+ env:
+ - name: JUNIT_FAIL_ON_CHANGE
+ fail_on_ignore:
+ name: JUnit fail on ignore
+ default: False
+ description: Consider failed tasks as a junit test failure even if ignore_on_error is set
+ env:
+ - name: JUNIT_FAIL_ON_IGNORE
+ include_setup_tasks_in_report:
+ name: JUnit include setup tasks in report
+ default: True
+ description: Should the setup tasks be included in the final report
+ env:
+ - name: JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT
+ hide_task_arguments:
+ name: Hide the arguments for a task
+ default: False
+ description: Hide the arguments for a task
+ version_added: "2.8"
+ env:
+ - name: JUNIT_HIDE_TASK_ARGUMENTS
+ test_case_prefix:
+ name: Prefix to find actual test cases
+ default: <empty>
+ description: Consider a task only as test case if it has this value as prefix. Additionally failing tasks are recorded as failed test cases.
+ version_added: "2.8"
+ env:
+ - name: JUNIT_TEST_CASE_PREFIX
+ requirements:
+ - enable in configuration
+'''
+
+import os
+import time
+import re
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+from ansible.utils._junit_xml import (
+ TestCase,
+ TestError,
+ TestFailure,
+ TestSuite,
+ TestSuites,
+)
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback writes playbook output to a JUnit formatted XML file.
+
+ Tasks show up in the report as follows:
+ 'ok': pass
+ 'failed' with 'EXPECTED FAILURE' in the task name: pass
+ 'failed' with 'TOGGLE RESULT' in the task name: pass
+ 'ok' with 'TOGGLE RESULT' in the task name: failure
+ 'failed' due to an exception: error
+ 'failed' for other reasons: failure
+ 'skipped': skipped
+
+ This plugin makes use of the following environment variables:
+ JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
+ Default: ~/.ansible.log
+ JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
+ Default: False
+ JUNIT_TASK_RELATIVE_PATH (optional): Configure the output to use relative paths to given directory
+ Default: none
+ JUNIT_FAIL_ON_CHANGE (optional): Consider any tasks reporting "changed" as a junit test failure
+ Default: False
+ JUNIT_FAIL_ON_IGNORE (optional): Consider failed tasks as a junit test failure even if ignore_on_error is set
+ Default: False
+ JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT (optional): Should the setup tasks be included in the final report
+ Default: True
+ JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
+ Default: False
+ JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionally failing tasks are recorded as failed
+ test cases.
+ Default: <empty>
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'junit'
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
+ self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
+ self._task_relative_path = os.getenv('JUNIT_TASK_RELATIVE_PATH', '')
+ self._fail_on_change = os.getenv('JUNIT_FAIL_ON_CHANGE', 'False').lower()
+ self._fail_on_ignore = os.getenv('JUNIT_FAIL_ON_IGNORE', 'False').lower()
+ self._include_setup_tasks_in_report = os.getenv('JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT', 'True').lower()
+ self._hide_task_arguments = os.getenv('JUNIT_HIDE_TASK_ARGUMENTS', 'False').lower()
+ self._test_case_prefix = os.getenv('JUNIT_TEST_CASE_PREFIX', '')
+ self._replace_out_of_tree_path = os.getenv('JUNIT_REPLACE_OUT_OF_TREE_PATH', None)
+ self._playbook_path = None
+ self._playbook_name = None
+ self._play_name = None
+ self._task_data = None
+
+ self.disabled = False
+
+ self._task_data = {}
+
+ if self._replace_out_of_tree_path is not None:
+ self._replace_out_of_tree_path = to_text(self._replace_out_of_tree_path)
+
+ if not os.path.exists(self._output_dir):
+ os.makedirs(self._output_dir)
+
+ def _start_task(self, task):
+ """ record the start of a task for one or more hosts """
+
+ uuid = task._uuid
+
+ if uuid in self._task_data:
+ return
+
+ play = self._play_name
+ name = task.get_name().strip()
+ path = task.get_path()
+ action = task.action
+
+ if not task.no_log and self._hide_task_arguments == 'false':
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ if args:
+ name += ' ' + args
+
+ self._task_data[uuid] = TaskData(uuid, name, path, play, action)
+
+ def _finish_task(self, status, result):
+ """ record the results of a task for a single host """
+
+ task_uuid = result._task._uuid
+
+ if hasattr(result, '_host'):
+ host_uuid = result._host._uuid
+ host_name = result._host.name
+ else:
+ host_uuid = 'include'
+ host_name = 'include'
+
+ task_data = self._task_data[task_uuid]
+
+ if self._fail_on_change == 'true' and status == 'ok' and result._result.get('changed', False):
+ status = 'failed'
+
+ # ignore failure if expected and toggle result if asked for
+ if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
+ status = 'ok'
+ elif 'TOGGLE RESULT' in task_data.name:
+ if status == 'failed':
+ status = 'ok'
+ elif status == 'ok':
+ status = 'failed'
+
+ if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
+ task_data.add_host(HostData(host_uuid, host_name, status, result))
+
+ def _build_test_case(self, task_data, host_data):
+ """ build a TestCase from the given TaskData and HostData """
+
+ name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ duration = host_data.finish - task_data.start
+
+ if self._task_relative_path and task_data.path:
+ junit_classname = to_text(os.path.relpath(to_bytes(task_data.path), to_bytes(self._task_relative_path)))
+ else:
+ junit_classname = task_data.path
+
+ if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'):
+ junit_classname = self._replace_out_of_tree_path + to_text(os.path.basename(to_bytes(junit_classname)))
+
+ if self._task_class == 'true':
+ junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
+
+ if host_data.status == 'included':
+ return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))
+
+ res = host_data.result._result
+ rc = res.get('rc', 0)
+ dump = self._dump_results(res, indent=0)
+ dump = self._cleanse_string(dump)
+
+ if host_data.status == 'ok':
+ return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump)
+
+ test_case = TestCase(name=name, classname=junit_classname, time=duration)
+
+ if host_data.status == 'failed':
+ if 'exception' in res:
+ message = res['exception'].strip().split('\n')[-1]
+ output = res['exception']
+ test_case.errors.append(TestError(message=message, output=output))
+ elif 'msg' in res:
+ message = res['msg']
+ test_case.failures.append(TestFailure(message=message, output=dump))
+ else:
+ test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump))
+ elif host_data.status == 'skipped':
+ if 'skip_reason' in res:
+ message = res['skip_reason']
+ else:
+ message = 'skipped'
+ test_case.skipped = message
+
+ return test_case
+
+ def _cleanse_string(self, value):
+ """ convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
+ return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
+
+ def _generate_report(self):
+ """ generate a TestSuite report from the collected TaskData and HostData """
+
+ test_cases = []
+
+ for task_uuid, task_data in self._task_data.items():
+ if task_data.action in C._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
+ continue
+
+ for host_uuid, host_data in task_data.host_data.items():
+ test_cases.append(self._build_test_case(task_data, host_data))
+
+ test_suite = TestSuite(name=self._playbook_name, cases=test_cases)
+ test_suites = TestSuites(suites=[test_suite])
+ report = test_suites.to_pretty_xml()
+
+ output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
+
+ with open(output_file, 'wb') as xml:
+ xml.write(to_bytes(report, errors='surrogate_or_strict'))
+
+ def v2_playbook_on_start(self, playbook):
+ self._playbook_path = playbook._file_name
+ self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
+
+ def v2_playbook_on_play_start(self, play):
+ self._play_name = play.get_name()
+
+ def v2_runner_on_no_hosts(self, task):
+ self._start_task(task)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._start_task(task)
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._start_task(task)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._start_task(task)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors and self._fail_on_ignore != 'true':
+ self._finish_task('ok', result)
+ else:
+ self._finish_task('failed', result)
+
+ def v2_runner_on_ok(self, result):
+ self._finish_task('ok', result)
+
+ def v2_runner_on_skipped(self, result):
+ self._finish_task('skipped', result)
+
+ def v2_playbook_on_include(self, included_file):
+ self._finish_task('included', included_file)
+
+ def v2_playbook_on_stats(self, stats):
+ self._generate_report()
+
+
+class TaskData:
+ """
+ Data about an individual task.
+ """
+
+ def __init__(self, uuid, name, path, play, action):
+ self.uuid = uuid
+ self.name = name
+ self.path = path
+ self.play = play
+ self.start = None
+ self.host_data = {}
+ self.start = time.time()
+ self.action = action
+
+ def add_host(self, host):
+ if host.uuid in self.host_data:
+ if host.status == 'included':
+ # concatenate task include output from multiple items
+ host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ else:
+ raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
+
+ self.host_data[host.uuid] = host
+
+
+class HostData:
+ """
+ Data about an individual host.
+ """
+
+ def __init__(self, uuid, name, status, result):
+ self.uuid = uuid
+ self.name = name
+ self.status = status
+ self.result = result
+ self.finish = time.time()
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
new file mode 100644
index 0000000..c4d713f
--- /dev/null
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -0,0 +1,80 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: minimal
+ type: stdout
+ short_description: minimal Ansible screen output
+ version_added: historical
+ description:
+ - This is the default output callback used by the ansible command (ad-hoc)
+ extends_documentation_fragment:
+ - result_format_callback
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'minimal'
+
+ def _command_generic_msg(self, host, result, caption):
+ ''' output the result of a command run '''
+
+ buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc', -1))
+ buf += result.get('stdout', '')
+ buf += result.get('stderr', '')
+ buf += result.get('msg', '')
+
+ return buf + "\n"
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ self._handle_exception(result._result)
+ self._handle_warnings(result._result)
+
+ if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
+ else:
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+
+ self._handle_warnings(result._result)
+
+ if result._result.get('changed', False):
+ color = C.COLOR_CHANGED
+ state = 'CHANGED'
+ else:
+ color = C.COLOR_OK
+ state = 'SUCCESS'
+
+ if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ else:
+ self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=4)), color=color)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff']:
+ self._display.display(self._get_diff(result._result['diff']))
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
new file mode 100644
index 0000000..fd51b27
--- /dev/null
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -0,0 +1,77 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: oneline
+ type: stdout
+ short_description: oneline Ansible screen output
+ version_added: historical
+ description:
+ - This is the output callback used by the -o/--one-line command line option.
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'oneline'
+
+ def _command_generic_msg(self, hostname, result, caption):
+ stdout = result.get('stdout', '').replace('\n', '\\n').replace('\r', '\\r')
+ if 'stderr' in result and result['stderr']:
+ stderr = result.get('stderr', '').replace('\n', '\\n').replace('\r', '\\r')
+ return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc', -1), stdout, stderr)
+ else:
+ return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc', -1), stdout)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n', '')
+
+ if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, 'FAILED'), color=C.COLOR_ERROR)
+ else:
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n', '')),
+ color=C.COLOR_ERROR)
+
+ def v2_runner_on_ok(self, result):
+
+ if result._result.get('changed', False):
+ color = C.COLOR_CHANGED
+ state = 'CHANGED'
+ else:
+ color = C.COLOR_OK
+ state = 'SUCCESS'
+
+ if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ else:
+ self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=0).replace('\n', '')),
+ color=color)
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE!: %s" % (result._host.get_name(), result._result.get('msg', '')), color=C.COLOR_UNREACHABLE)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py
new file mode 100644
index 0000000..a9f65d2
--- /dev/null
+++ b/lib/ansible/plugins/callback/tree.py
@@ -0,0 +1,86 @@
+# (c) 2012-2014, Ansible, Inc
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: tree
+ type: notification
+ requirements:
+ - invoked in the command line
+ short_description: Save host events to files
+ version_added: "2.0"
+ options:
+ directory:
+ version_added: '2.11'
+ description: directory that will contain the per host JSON files. Also set by the C(--tree) option when using adhoc.
+ ini:
+ - section: callback_tree
+ key: directory
+ env:
+ - name: ANSIBLE_CALLBACK_TREE_DIR
+ default: "~/.ansible/tree"
+ type: path
+ description:
+ - "This callback is used by the Ansible (adhoc) command line option C(-t|--tree)."
+ - This produces a JSON dump of events in a directory, a file for each host, the directory used MUST be passed as a command line option.
+'''
+
+import os
+
+from ansible.constants import TREE_DIR
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.path import makedirs_safe, unfrackpath
+
+
+class CallbackModule(CallbackBase):
+ '''
+ This callback puts results into a host specific file in a directory in json format.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'tree'
+ CALLBACK_NEEDS_ENABLED = True
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ ''' override to set self.tree '''
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ if TREE_DIR:
+ # TREE_DIR comes from the CLI option --tree, only available for adhoc
+ self.tree = unfrackpath(TREE_DIR)
+ else:
+ self.tree = self.get_option('directory')
+
+ def write_tree_file(self, hostname, buf):
+ ''' write something into treedir/hostname '''
+
+ buf = to_bytes(buf)
+ try:
+ makedirs_safe(self.tree)
+ except (OSError, IOError) as e:
+ self._display.warning(u"Unable to access or create the configured directory (%s): %s" % (to_text(self.tree), to_text(e)))
+
+ try:
+ path = to_bytes(os.path.join(self.tree, hostname))
+ with open(path, 'wb+') as fd:
+ fd.write(buf)
+ except (OSError, IOError) as e:
+ self._display.warning(u"Unable to write to %s's file: %s" % (hostname, to_text(e)))
+
+ def result_to_tree(self, result):
+ self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
+
+ def v2_runner_on_ok(self, result):
+ self.result_to_tree(result)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.result_to_tree(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self.result_to_tree(result)
diff --git a/lib/ansible/plugins/cliconf/__init__.py b/lib/ansible/plugins/cliconf/__init__.py
new file mode 100644
index 0000000..be0f23e
--- /dev/null
+++ b/lib/ansible/plugins/cliconf/__init__.py
@@ -0,0 +1,477 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+from functools import wraps
+
+from ansible.plugins import AnsiblePlugin
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from scp import SCPClient
+ HAS_SCP = True
+except ImportError:
+ HAS_SCP = False
+
+
+def enable_mode(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ prompt = self._connection.get_prompt()
+ if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
+ raise AnsibleError('operation requires privilege escalation')
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class CliconfBase(AnsiblePlugin):
+ """
+ A base class for implementing cli connections
+
+ .. note:: String inputs to :meth:`send_command` will be cast to byte strings
+ within this method and as such are not required to be made byte strings
+ beforehand. Please avoid using literal byte strings (``b'string'``) in
+ :class:`CliConfBase` plugins as this can lead to unexpected errors when
+ running on Python 3
+
+ List of supported rpc's:
+ :get_config: Retrieves the specified configuration from the device
+ :edit_config: Loads the specified commands into the remote device
+ :get: Execute specified command on remote device
+ :get_capabilities: Retrieves device information and supported rpc methods
+ :commit: Load configuration from candidate to running
+ :discard_changes: Discard changes to candidate datastore
+
+ Note: List of supported rpc's for remote device can be extracted from
+ output of get_capabilities()
+
+ :returns: Returns output received from remote device as byte string
+
+ Usage:
+ from ansible.module_utils.connection import Connection
+
+ conn = Connection()
+ conn.get('show lldp neighbors detail')
+ conn.get_config('running')
+ conn.edit_config(['hostname test', 'netconf ssh'])
+ """
+
+ __rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
+
+ def __init__(self, connection):
+ super(CliconfBase, self).__init__()
+ self._connection = connection
+ self.history = list()
+ self.response_logging = False
+
+ def _alarm_handler(self, signum, frame):
+ """Alarm handler raised in case of command timeout """
+ self._connection.queue_message('log', 'closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout)
+ self.close()
+
+ def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
+ """Executes a command over the device connection
+
+ This method will execute a command over the device connection and
+ return the results to the caller. This method will also perform
+ logging of any commands based on the `nolog` argument.
+
+ :param command: The command to send over the connection to the device
+ :param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
+ :param answer: The answer to respond with if the prompt is matched.
+ :param sendonly: Bool value that will send the command but not wait for a result.
+ :param newline: Bool value that will append the newline character to the command
+ :param prompt_retry_check: Bool value for trying to detect more prompts
+ :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
+ given prompt.
+ :returns: The output from the device after executing the command
+ """
+ kwargs = {
+ 'command': to_bytes(command),
+ 'sendonly': sendonly,
+ 'newline': newline,
+ 'prompt_retry_check': prompt_retry_check,
+ 'check_all': check_all
+ }
+
+ if prompt is not None:
+ if isinstance(prompt, list):
+ kwargs['prompt'] = [to_bytes(p) for p in prompt]
+ else:
+ kwargs['prompt'] = to_bytes(prompt)
+ if answer is not None:
+ if isinstance(answer, list):
+ kwargs['answer'] = [to_bytes(p) for p in answer]
+ else:
+ kwargs['answer'] = to_bytes(answer)
+
+ resp = self._connection.send(**kwargs)
+
+ if not self.response_logging:
+ self.history.append(('*****', '*****'))
+ else:
+ self.history.append((kwargs['command'], resp))
+
+ return resp
+
+ def get_base_rpc(self):
+ """Returns list of base rpc method supported by remote device"""
+ return self.__rpc__
+
+ def get_history(self):
+ """ Returns the history file for all commands
+
+ This will return a log of all the commands that have been sent to
+ the device and all of the output received. By default, all commands
+ and output will be redacted unless explicitly configured otherwise.
+
+ :return: An ordered list of command, output pairs
+ """
+ return self.history
+
+ def reset_history(self):
+ """ Resets the history of run commands
+ :return: None
+ """
+ self.history = list()
+
+ def enable_response_logging(self):
+ """Enable logging command response"""
+ self.response_logging = True
+
+ def disable_response_logging(self):
+ """Disable logging command response"""
+ self.response_logging = False
+
+ @abstractmethod
+ def get_config(self, source='running', flags=None, format=None):
+ """Retrieves the specified configuration from the device
+
+ This method will retrieve the configuration specified by source and
+ return it to the caller as a string. Subsequent calls to this method
+ will retrieve a new configuration from the device
+
+ :param source: The configuration source to return from the device.
+ This argument accepts either `running` or `startup` as valid values.
+
+ :param flags: For devices that support configuration filtering, this
+ keyword argument is used to filter the returned configuration.
+ The use of this keyword argument is device dependent and will be
+ silently ignored on devices that do not support it.
+
+ :param format: For devices that support fetching different configuration
+ format, this keyword argument is used to specify the format in which
+ configuration is to be retrieved.
+
+ :return: The device configuration as specified by the source argument.
+ """
+ pass
+
+ @abstractmethod
+ def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
+ """Loads the candidate configuration into the network device
+
+ This method will load the specified candidate config into the device
+ and merge with the current configuration unless replace is set to
+ True. If the device does not support config replace an errors
+ is returned.
+
+ :param candidate: The configuration to load into the device and merge
+ with the current running configuration
+
+ :param commit: Boolean value that indicates if the device candidate
+ configuration should be pushed in the running configuration or discarded.
+
+ :param replace: If the value is True/False it indicates if running configuration should be completely
+ replace by candidate configuration. If can also take configuration file path as value,
+ the file in this case should be present on the remote host in the mentioned path as a
+ prerequisite.
+ :param comment: Commit comment provided it is supported by remote host
+ :return: Returns a json string with contains configuration applied on remote host, the returned
+ response on executing configuration commands and platform relevant data.
+ {
+ "diff": "",
+ "response": [],
+ "request": []
+ }
+
+ """
+ pass
+
+ @abstractmethod
+ def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
+ """Execute specified command on remote device
+ This method will retrieve the specified data and
+ return it to the caller as a string.
+ :param command: command in string format to be executed on remote device
+ :param prompt: the expected prompt generated by executing command, this can
+ be a string or a list of strings
+ :param answer: the string to respond to the prompt with
+ :param sendonly: bool to disable waiting for response, default is false
+ :param newline: bool to indicate if newline should be added at end of answer or not
+ :param output: For devices that support fetching command output in different
+ format, this keyword argument is used to specify the output in which
+ response is to be retrieved.
+ :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
+ given prompt.
+ :return: The output from the device after executing the command
+ """
+ pass
+
+ @abstractmethod
+ def get_capabilities(self):
+ """Returns the basic capabilities of the network device
+ This method will provide some basic facts about the device and
+ what capabilities it has to modify the configuration. The minimum
+ return from this method takes the following format.
+ eg:
+ {
+
+ 'rpc': [list of supported rpcs],
+ 'network_api': <str>, # the name of the transport
+ 'device_info': {
+ 'network_os': <str>,
+ 'network_os_version': <str>,
+ 'network_os_model': <str>,
+ 'network_os_hostname': <str>,
+ 'network_os_image': <str>,
+ 'network_os_platform': <str>,
+ },
+ 'device_operations': {
+ 'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
+ 'supports_commit': <bool>, # identify if commit is supported by device or not
+ 'supports_rollback': <bool>, # identify if rollback is supported or not
+ 'supports_defaults': <bool>, # identify if fetching running config with default is supported
+ 'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
+ 'supports_onbox_diff': <bool>, # identify if on box diff capability is supported or not
+ 'supports_generate_diff': <bool>, # identify if diff capability is supported within plugin
+ 'supports_multiline_delimiter': <bool>, # identify if multiline demiliter is supported within config
+ 'supports_diff_match': <bool>, # identify if match is supported
+ 'supports_diff_ignore_lines': <bool>, # identify if ignore line in diff is supported
+ 'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
+ 'supports_admin': <bool>, # identify if admin configure mode is supported or not
+ 'supports_commit_label': <bool>, # identify if commit label is supported or not
+ }
+ 'format': [list of supported configuration format],
+ 'diff_match': [list of supported match values],
+ 'diff_replace': [list of supported replace values],
+ 'output': [list of supported command output format]
+ }
+ :return: capability as json string
+ """
+ result = {}
+ result['rpc'] = self.get_base_rpc()
+ result['device_info'] = self.get_device_info()
+ result['network_api'] = 'cliconf'
+ return result
+
+ @abstractmethod
+ def get_device_info(self):
+ """Returns basic information about the network device.
+
+ This method will provide basic information about the device such as OS version and model
+ name. This data is expected to be used to fill the 'device_info' key in get_capabilities()
+ above.
+
+ :return: dictionary of device information
+ """
+ pass
+
+ def commit(self, comment=None):
+ """Commit configuration changes
+
+ This method will perform the commit operation on a previously loaded
+ candidate configuration that was loaded using `edit_config()`. If
+ there is a candidate configuration, it will be committed to the
+ active configuration. If there is not a candidate configuration, this
+ method should just silently return.
+
+ :return: None
+ """
+ return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
+
+ def discard_changes(self):
+ """Discard candidate configuration
+
+ This method will discard the current candidate configuration if one
+ is present. If there is no candidate configuration currently loaded,
+ then this method should just silently return
+
+ :returns: None
+ """
+ return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
+
+ def rollback(self, rollback_id, commit=True):
+ """
+
+ :param rollback_id: The commit id to which configuration should be rollbacked
+ :param commit: Flag to indicate if changes should be committed or not
+ :return: Returns diff between before and after change.
+ """
+ pass
+
+ def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
+ """Copies file over scp/sftp to remote device
+
+ :param source: Source file path
+ :param destination: Destination file path on remote device
+ :param proto: Protocol to be used for file transfer,
+ supported protocol: scp and sftp
+ :param timeout: Specifies the wait time to receive response from
+ remote host before triggering timeout exception
+ :return: None
+ """
+ ssh = self._connection.paramiko_conn._connect_uncached()
+ if proto == 'scp':
+ if not HAS_SCP:
+ raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
+ with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
+ out = scp.put(source, destination)
+ elif proto == 'sftp':
+ with ssh.open_sftp() as sftp:
+ sftp.put(source, destination)
+
+ def get_file(self, source=None, destination=None, proto='scp', timeout=30):
+ """Fetch file over scp/sftp from remote device
+ :param source: Source file path
+ :param destination: Destination file path
+ :param proto: Protocol to be used for file transfer,
+ supported protocol: scp and sftp
+ :param timeout: Specifies the wait time to receive response from
+ remote host before triggering timeout exception
+ :return: None
+ """
+ """Fetch file over scp/sftp from remote device"""
+ ssh = self._connection.paramiko_conn._connect_uncached()
+ if proto == 'scp':
+ if not HAS_SCP:
+ raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
+ try:
+ with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
+ scp.get(source, destination)
+ except EOFError:
+ # This appears to be benign.
+ pass
+ elif proto == 'sftp':
+ with ssh.open_sftp() as sftp:
+ sftp.get(source, destination)
+
+ def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
+ """
+ Generate diff between candidate and running configuration. If the
+ remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
+ candidate and running configurations are not required to be passed as argument.
+ In case if onbox diff capability is not supported candidate argument is mandatory
+ and running argument is optional.
+ :param candidate: The configuration which is expected to be present on remote host.
+ :param running: The base configuration which is used to generate diff.
+ :param diff_match: Instructs how to match the candidate configuration with current device configuration
+ Valid values are 'line', 'strict', 'exact', 'none'.
+ 'line' - commands are matched line by line
+ 'strict' - command lines are matched with respect to position
+ 'exact' - command lines must be an equal match
+ 'none' - will not compare the candidate configuration with the running configuration
+ :param diff_ignore_lines: Use this argument to specify one or more lines that should be
+ ignored during the diff. This is used for lines in the configuration
+ that are automatically updated by the system. This argument takes
+ a list of regular expressions or exact line matches.
+ :param path: The ordered set of parents that uniquely identify the section or hierarchy
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ :param diff_replace: Instructs on the way to perform the configuration on the device.
+ If the replace argument is set to I(line) then the modified lines are
+ pushed to the device in configuration mode. If the replace argument is
+ set to I(block) then the entire command block is pushed to the device in
+ configuration mode if any line is not correct.
+ :return: Configuration and/or banner diff in json format.
+ {
+ 'config_diff': ''
+ }
+
+ """
+ pass
+
+ def run_commands(self, commands=None, check_rc=True):
+ """
+ Execute a list of commands on remote host and return the list of response
+ :param commands: The list of command that needs to be executed on remote host.
+ The individual command in list can either be a command string or command dict.
+ If the command is dict the valid keys are
+ {
+ 'command': <command to be executed>
+ 'prompt': <expected prompt on executing the command>,
+ 'answer': <answer for the prompt>,
+ 'output': <the format in which command output should be rendered eg: 'json', 'text'>,
+ 'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
+ }
+ :param check_rc: Boolean flag to check if returned response should be checked for error or not.
+ If check_rc is False the error output is appended in return response list, else if the
+ value is True an exception is raised.
+ :return: List of returned response
+ """
+ pass
+
+ def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
+
+ if not candidate and not replace:
+ raise ValueError("must provide a candidate or replace to load configuration")
+
+ if commit not in (True, False):
+ raise ValueError("'commit' must be a bool, got %s" % commit)
+
+ if replace and not operations['supports_replace']:
+ raise ValueError("configuration replace is not supported")
+
+ if comment and not operations.get('supports_commit_comment', False):
+ raise ValueError("commit comment is not supported")
+
+ if replace and not operations.get('supports_replace', False):
+ raise ValueError("configuration replace is not supported")
+
+ def set_cli_prompt_context(self):
+ """
+ Ensure the command prompt on device is in right mode
+ :return: None
+ """
+ pass
+
+ def _update_cli_prompt_context(self, config_context=None, exit_command='exit'):
+ """
+ Update the cli prompt context to ensure it is in operational mode
+ :param config_context: It is string value to identify if the current cli prompt ends with config mode prompt
+ :param exit_command: Command to execute to exit the config mode
+ :return: None
+ """
+ out = self._connection.get_prompt()
+ if out is None:
+ raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
+ u' response window: %s' % self._connection._last_recv_window)
+
+ while True:
+ out = to_text(out, errors='surrogate_then_replace').strip()
+ if config_context and out.endswith(config_context):
+ self._connection.queue_message('vvvv', 'wrong context, sending exit to device')
+ self.send_command(exit_command)
+ out = self._connection.get_prompt()
+ else:
+ break
diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py
new file mode 100644
index 0000000..daa683c
--- /dev/null
+++ b/lib/ansible/plugins/connection/__init__.py
@@ -0,0 +1,382 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017, Peter Sprygada <psprygad@redhat.com>
+# (c) 2017 Ansible Project
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fcntl
+import os
+import shlex
+import typing as t
+
+from abc import abstractmethod
+from functools import wraps
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.display import Display
+from ansible.plugins.loader import connection_loader, get_shell_plugin
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+__all__ = ['ConnectionBase', 'ensure_connect']
+
+BUFSIZE = 65536
+
+
+def ensure_connect(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if not self._connected:
+ self._connect()
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class ConnectionBase(AnsiblePlugin):
+ '''
+ A base class for connections to contain common code.
+ '''
+
+ has_pipelining = False
+ has_native_async = False # eg, winrm
+ always_pipeline_modules = False # eg, winrm
+ has_tty = True # for interacting with become plugins
+ # When running over this connection type, prefer modules written in a certain language
+ # as discovered by the specified file extension. An empty string as the
+ # language means any language.
+ module_implementation_preferences = ('',) # type: t.Iterable[str]
+ allow_executable = True
+
+ # the following control whether or not the connection supports the
+ # persistent connection framework or not
+ supports_persistence = False
+ force_persistence = False
+
+ default_user = None
+
+ def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs):
+
+ super(ConnectionBase, self).__init__()
+
+ # All these hasattrs allow subclasses to override these parameters
+ if not hasattr(self, '_play_context'):
+ # Backwards compat: self._play_context isn't really needed, using set_options/get_option
+ self._play_context = play_context
+ if not hasattr(self, '_new_stdin'):
+ self._new_stdin = new_stdin
+ if not hasattr(self, '_display'):
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+ if not hasattr(self, '_connected'):
+ self._connected = False
+
+ self.success_key = None
+ self.prompt = None
+ self._connected = False
+ self._socket_path = None
+
+ # helper plugins
+ self._shell = shell
+
+ # we always must have shell
+ if not self._shell:
+ shell_type = play_context.shell if play_context.shell else getattr(self, '_shell_type', None)
+ self._shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable)
+
+ self.become = None
+
+ def set_become_plugin(self, plugin):
+ self.become = plugin
+
+ @property
+ def connected(self):
+ '''Read-only property holding whether the connection to the remote host is active or closed.'''
+ return self._connected
+
+ @property
+ def socket_path(self):
+ '''Read-only property holding the connection socket path for this remote host'''
+ return self._socket_path
+
+ @staticmethod
+ def _split_ssh_args(argstring):
+ """
+ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
+ list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
+ the argument list. The list will not contain any empty elements.
+ """
+ # In Python3, shlex.split doesn't work on a byte string.
+ return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
+
+ @property
+ @abstractmethod
+ def transport(self):
+ """String used to identify this Connection class from other classes"""
+ pass
+
+ @abstractmethod
+ def _connect(self):
+ """Connect to the host we've been initialized with"""
+
+ @ensure_connect
+ @abstractmethod
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """Run a command on the remote host.
+
+ :arg cmd: byte string containing the command
+ :kwarg in_data: If set, this data is passed to the command's stdin.
+ This is used to implement pipelining. Currently not all
+ connection plugins implement pipelining.
+ :kwarg sudoable: Tell the connection plugin if we're executing
+ a command via a privilege escalation mechanism. This may affect
+ how the connection plugin returns data. Note that not all
+ connections can handle privilege escalation.
+ :returns: a tuple of (return code, stdout, stderr) The return code is
+ an int while stdout and stderr are both byte strings.
+
+ When a command is executed, it goes through multiple commands to get
+ there. It looks approximately like this::
+
+ [LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
+ :LocalShell: Is optional. It is run locally to invoke the
+ ``Connection Command``. In most instances, the
+ ``ConnectionCommand`` can be invoked directly instead. The ssh
+ connection plugin which can have values that need expanding
+ locally specified via ssh_args is the sole known exception to
+ this. Shell metacharacters in the command itself should be
+ processed on the remote machine, not on the local machine so no
+ shell is needed on the local machine. (Example, ``/bin/sh``)
+ :ConnectionCommand: This is the command that connects us to the remote
+ machine to run the rest of the command. ``ansible_user``,
+ ``ansible_ssh_host`` and so forth are fed to this piece of the
+ command to connect to the correct host (Examples ``ssh``,
+ ``chroot``)
+ :UsersLoginShell: This shell may or may not be created depending on
+ the ConnectionCommand used by the connection plugin. This is the
+ shell that the ``ansible_user`` has configured as their login
+ shell. In traditional UNIX parlance, this is the last field of
+ a user's ``/etc/passwd`` entry We do not specifically try to run
+ the ``UsersLoginShell`` when we connect. Instead it is implicit
+ in the actions that the ``ConnectionCommand`` takes when it
+ connects to a remote machine. ``ansible_shell_type`` may be set
+ to inform ansible of differences in how the ``UsersLoginShell``
+ handles things like quoting if a shell has different semantics
+ than the Bourne shell.
+ :ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
+ ``ansible_shell_executable`` or via
+ ``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
+ We explicitly invoke this shell so that we have predictable
+ quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
+ settable by the user because some sudo setups may only allow
+ invoking a specific shell. (For instance, ``/bin/bash`` may be
+ allowed but ``/bin/sh``, our default, may not). We invoke this
+ twice, once after the ``ConnectionCommand`` and once after the
+ ``BecomeCommand``. After the ConnectionCommand, this is run by
+ the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
+ that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
+ :BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
+ privilege escalation. Setting this up is performed by the action
+ plugin prior to running ``exec_command``. So we just get passed
+ :param:`cmd` which has the BecomeCommand already added.
+ (Examples: sudo, su) If we have a BecomeCommand then we will
+ invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
+ have a consistent view of quoting.
+ :Command: Is the command we're actually trying to run remotely.
+ (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
+ """
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local; callers are expected to have pre-created the directory chain for out_path"""
+ pass
+
+ @abstractmethod
+ def close(self):
+ """Terminate the connection"""
+ pass
+
+ def connection_lock(self):
+ f = self._play_context.connection_lockfd
+ display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+
+ def connection_unlock(self):
+ f = self._play_context.connection_lockfd
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+
+ def reset(self):
+ display.warning("Reset is not implemented for this connection")
+
+ def update_vars(self, variables):
+ '''
+ Adds 'magic' variables relating to connections to the variable dictionary provided.
+ In case users need to access from the play, this is a legacy from runner.
+ '''
+ for varname in C.COMMON_CONNECTION_VARS:
+ value = None
+ if varname in variables:
+ # dont update existing
+ continue
+ elif 'password' in varname or 'passwd' in varname:
+ # no secrets!
+ continue
+ elif varname == 'ansible_connection':
+ # its me mom!
+ value = self._load_name
+ elif varname == 'ansible_shell_type':
+ # its my cousin ...
+ value = self._shell._load_name
+ else:
+ # deal with generic options if the plugin supports em (for exmaple not all connections have a remote user)
+ options = C.config.get_plugin_options_from_var('connection', self._load_name, varname)
+ if options:
+ value = self.get_option(options[0]) # for these variables there should be only one option
+ elif 'become' not in varname:
+ # fallback to play_context, unles becoem related TODO: in the end should come from task/play and not pc
+ for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
+ if varname in var_list:
+ try:
+ value = getattr(self._play_context, prop)
+ break
+ except AttributeError:
+ # it was not defined, fine to ignore
+ continue
+
+ if value is not None:
+ display.debug('Set connection var {0} to {1}'.format(varname, value))
+ variables[varname] = value
+
+
+class NetworkConnectionBase(ConnectionBase):
+ """
+ A base class for network-style connections.
+ """
+
+ force_persistence = True
+ # Do not use _remote_is_local in other connections
+ _remote_is_local = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(NetworkConnectionBase, self).__init__(play_context, new_stdin, *args, **kwargs)
+ self._messages = []
+ self._conn_closed = False
+
+ self._network_os = self._play_context.network_os
+
+ self._local = connection_loader.get('local', play_context, '/dev/null')
+ self._local.set_options()
+
+ self._sub_plugin = {}
+ self._cached_variables = (None, None, None)
+
+ # reconstruct the socket_path and set instance values accordingly
+ self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
+ self._update_connection_state()
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if not name.startswith('_'):
+ plugin = self._sub_plugin.get('obj')
+ if plugin:
+ method = getattr(plugin, name, None)
+ if method is not None:
+ return method
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ return self._local.exec_command(cmd, in_data, sudoable)
+
+ def queue_message(self, level, message):
+ """
+ Adds a message to the queue of messages waiting to be pushed back to the controller process.
+
+ :arg level: A string which can either be the name of a method in display, or 'log'. When
+ the messages are returned to task_executor, a value of log will correspond to
+ ``display.display(message, log_only=True)``, while another value will call ``display.[level](message)``
+ """
+ self._messages.append((level, message))
+
+ def pop_messages(self):
+ messages, self._messages = self._messages, []
+ return messages
+
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ return self._local.put_file(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local"""
+ return self._local.fetch_file(in_path, out_path)
+
+ def reset(self):
+ '''
+ Reset the connection
+ '''
+ if self._socket_path:
+ self.queue_message('vvvv', 'resetting persistent connection for socket_path %s' % self._socket_path)
+ self.close()
+ self.queue_message('vvvv', 'reset call on connection instance')
+
+ def close(self):
+ self._conn_closed = True
+ if self._connected:
+ self._connected = False
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(NetworkConnectionBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+ if self.get_option('persistent_log_messages'):
+ warning = "Persistent connection logging is enabled for %s. This will log ALL interactions" % self._play_context.remote_addr
+ logpath = getattr(C, 'DEFAULT_LOG_PATH')
+ if logpath is not None:
+ warning += " to %s" % logpath
+ self.queue_message('warning', "%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!" % warning)
+
+ if self._sub_plugin.get('obj') and self._sub_plugin.get('type') != 'external':
+ try:
+ self._sub_plugin['obj'].set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+ except AttributeError:
+ pass
+
+ def _update_connection_state(self):
+ '''
+ Reconstruct the connection socket_path and check if it exists
+
+ If the socket path exists then the connection is active and set
+ both the _socket_path value to the path and the _connected value
+ to True. If the socket path doesn't exist, leave the socket path
+ value to None and the _connected value to False
+ '''
+ ssh = connection_loader.get('ssh', class_only=True)
+ control_path = ssh._create_control_path(
+ self._play_context.remote_addr, self._play_context.port,
+ self._play_context.remote_user, self._play_context.connection,
+ self._ansible_playbook_pid
+ )
+
+ tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
+ socket_path = unfrackpath(control_path % dict(directory=tmp_path))
+
+ if os.path.exists(socket_path):
+ self._connected = True
+ self._socket_path = socket_path
+
+ def _log_messages(self, message):
+ if self.get_option('persistent_log_messages'):
+ self.queue_message('log', message)
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
new file mode 100644
index 0000000..27afd10
--- /dev/null
+++ b/lib/ansible/plugins/connection/local.py
@@ -0,0 +1,194 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: local
+ short_description: execute on controller
+ description:
+ - This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
+ author: ansible (@core)
+ version_added: historical
+ extends_documentation_fragment:
+ - connection_pipelining
+ notes:
+ - The remote user is ignored, the user with which the ansible CLI was executed is used instead.
+'''
+
+import fcntl
+import getpass
+import os
+import pty
+import shutil
+import subprocess
+
+import ansible.constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local based connections '''
+
+ transport = 'local'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+
+ super(Connection, self).__init__(*args, **kwargs)
+ self.cwd = None
+ try:
+ self.default_user = getpass.getuser()
+ except KeyError:
+ display.vv("Current user (uid=%s) does not seem to exist on this system, leaving user empty." % os.getuid())
+ self.default_user = ""
+
+ def _connect(self):
+ ''' connect to the local host; nothing to do here '''
+
+ # Because we haven't made any remote connection we're running as
+ # the local user, rather than as whatever is configured in remote_user.
+ self._play_context.remote_user = self.default_user
+
+ if not self._connected:
+ display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the local host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.debug("in local.exec_command()")
+
+ executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
+
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError("failed to find the executable specified %s."
+ " Please verify if the executable exists and re-try." % executable)
+
+ display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = map(to_bytes, cmd)
+
+ master = None
+ stdin = subprocess.PIPE
+ if sudoable and self.become and self.become.expect_prompt() and not self.get_option('pipelining'):
+ # Create a pty if sudoable for privlege escalation that needs it.
+ # Falls back to using a standard pipe if this fails, which may
+ # cause the command to fail in certain situations where we are escalating
+ # privileges or the command otherwise needs a pty.
+ try:
+ master, stdin = pty.openpty()
+ except (IOError, OSError) as e:
+ display.debug("Unable to open pty: %s" % to_native(e))
+
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, (text_type, binary_type)),
+ executable=executable,
+ cwd=self.cwd,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ # if we created a master, we can close the other half of the pty now, otherwise master is stdin
+ if master is not None:
+ os.close(stdin)
+
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ if master is None:
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ # finally, close the other half of the pty, if it was created
+ if master:
+ os.close(master)
+
+ display.debug("done with local.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to local '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
+ try:
+ shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except shutil.Error:
+ raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path)))
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from local to local -- for compatibility '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ self.put_file(in_path, out_path)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
new file mode 100644
index 0000000..b9fd898
--- /dev/null
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -0,0 +1,695 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Ansible Core Team
+ name: paramiko
+ short_description: Run tasks via python ssh (paramiko)
+ description:
+ - Use the python ssh implementation (Paramiko) to connect to targets
+ - The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist
+ in their SSH implementations.
+ - This is needed on the Ansible control machine to be reasonably efficient with connections.
+ Thus paramiko is faster for most users on these platforms.
+ Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file.
+ - This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol.
+ version_added: "0.1"
+ options:
+ remote_addr:
+ description:
+ - Address of the remote target
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_ssh_host
+ - name: ansible_paramiko_host
+ remote_user:
+ description:
+ - User to login/authenticate as
+ - Can be set from the CLI via the C(--user) or C(-u) options.
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ - name: ansible_paramiko_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ - name: ANSIBLE_PARAMIKO_REMOTE_USER
+ version_added: '2.5'
+ ini:
+ - section: defaults
+ key: remote_user
+ - section: paramiko_connection
+ key: remote_user
+ version_added: '2.5'
+ keyword:
+ - name: remote_user
+ password:
+ description:
+ - Secret used to either login the ssh server or as a passphrase for ssh keys that require it
+ - Can be set from the CLI via the C(--ask-pass) option.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ - name: ansible_paramiko_pass
+ - name: ansible_paramiko_password
+ version_added: '2.5'
+ use_rsa_sha2_algorithms:
+ description:
+ - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys
+ - On paramiko versions older than 2.9, this only affects hostkeys
+ - For behavior matching paramiko<2.9 set this to C(False)
+ vars:
+ - name: ansible_paramiko_use_rsa_sha2_algorithms
+ ini:
+ - {key: use_rsa_sha2_algorithms, section: paramiko_connection}
+ env:
+ - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS}
+ default: True
+ type: boolean
+ version_added: '2.14'
+ host_key_auto_add:
+ description: 'Automatically add host keys'
+ env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
+ ini:
+ - {key: host_key_auto_add, section: paramiko_connection}
+ type: boolean
+ look_for_keys:
+ default: True
+ description: 'False to disable searching for private key files in ~/.ssh/'
+ env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+ proxy_command:
+ default: ''
+ description:
+ - Proxy information for running the connection via a jumphost
+ - Also this plugin will scan 'ssh_args', 'ssh_extra_args' and 'ssh_common_args' from the 'ssh' plugin settings for proxy information if set.
+ env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}]
+ ini:
+ - {key: proxy_command, section: paramiko_connection}
+ ssh_args:
+ description: Only used in parsing ProxyCommand for use in this plugin.
+ default: ''
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_args'
+ env:
+ - name: ANSIBLE_SSH_ARGS
+ vars:
+ - name: ansible_ssh_args
+ version_added: '2.7'
+ ssh_common_args:
+ description: Only used in parsing ProxyCommand for use in this plugin.
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_common_args'
+ version_added: '2.7'
+ env:
+ - name: ANSIBLE_SSH_COMMON_ARGS
+ version_added: '2.7'
+ vars:
+ - name: ansible_ssh_common_args
+ cli:
+ - name: ssh_common_args
+ default: ''
+ ssh_extra_args:
+ description: Only used in parsing ProxyCommand for use in this plugin.
+ vars:
+ - name: ansible_ssh_extra_args
+ env:
+ - name: ANSIBLE_SSH_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: ssh_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ cli:
+ - name: ssh_extra_args
+ default: ''
+ pty:
+ default: True
+ description: 'SUDO usually requires a PTY, True to give a PTY and False to not give a PTY.'
+ env:
+ - name: ANSIBLE_PARAMIKO_PTY
+ ini:
+ - section: paramiko_connection
+ key: pty
+ type: boolean
+ record_host_keys:
+ default: True
+ description: 'Save the host keys to a file'
+ env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}]
+ ini:
+ - section: paramiko_connection
+ key: record_host_keys
+ type: boolean
+ host_key_checking:
+ description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
+ type: boolean
+ default: True
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ version_added: '2.5'
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
+ version_added: '2.5'
+ ini:
+ - section: defaults
+ key: host_key_checking
+ - section: paramiko_connection
+ key: host_key_checking
+ version_added: '2.5'
+ vars:
+ - name: ansible_host_key_checking
+ version_added: '2.5'
+ - name: ansible_ssh_host_key_checking
+ version_added: '2.5'
+ - name: ansible_paramiko_host_key_checking
+ version_added: '2.5'
+ use_persistent_connections:
+ description: 'Toggles the use of persistence for connections'
+ type: boolean
+ default: False
+ env:
+ - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
+ ini:
+ - section: defaults
+ key: use_persistent_connections
+ banner_timeout:
+ type: float
+ default: 30
+ version_added: '2.14'
+ description:
+ - Configures, in seconds, the amount of time to wait for the SSH
+ banner to be presented. This option is supported by paramiko
+ version 1.15.0 or newer.
+ ini:
+ - section: paramiko_connection
+ key: banner_timeout
+ env:
+ - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT
+# TODO:
+#timeout=self._play_context.timeout,
+"""
+
+import os
+import socket
+import tempfile
+import traceback
+import fcntl
+import sys
+import re
+
+from termios import tcflush, TCIFLUSH
+from ansible.module_utils.compat.version import LooseVersion
+from binascii import hexlify
+
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+ AnsibleFileNotFound,
+)
+from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+display = Display()
+
+
+AUTHENTICITY_MSG = """
+paramiko: The authenticity of host '%s' can't be established.
+The %s key fingerprint is %s.
+Are you sure you want to continue connecting (yes/no)?
+"""
+
+# SSH Options Regex
+SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
+
+
+class MyAddPolicy(object):
+ """
+ Based on AutoAddPolicy in paramiko so we can determine when keys are added
+
+ and also prompt for input.
+
+ Policy for automatically adding the hostname and new host key to the
+ local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
+ """
+
+ def __init__(self, new_stdin, connection):
+ self._new_stdin = new_stdin
+ self.connection = connection
+ self._options = connection._options
+
+ def missing_host_key(self, client, hostname, key):
+
+ if all((self._options['host_key_checking'], not self._options['host_key_auto_add'])):
+
+ fingerprint = hexlify(key.get_fingerprint())
+ ktype = key.get_name()
+
+ if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
+ # don't print the prompt string since the user cannot respond
+ # to the question anyway
+ raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint))
+
+ self.connection.connection_lock()
+
+ old_stdin = sys.stdin
+ sys.stdin = self._new_stdin
+
+ # clear out any premature input on sys.stdin
+ tcflush(sys.stdin, TCIFLUSH)
+
+ inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
+ sys.stdin = old_stdin
+
+ self.connection.connection_unlock()
+
+ if inp not in ['yes', 'y', '']:
+ raise AnsibleError("host connection rejected by user")
+
+ key._added_by_ansible_this_time = True
+
+ # existing implementation below:
+ client._host_keys.add(hostname, key.get_name(), key)
+
+ # host keys are actually saved in close() function below
+ # in order to control ordering.
+
+
+# keep connection objects on a per host basis to avoid repeated attempts to reconnect
+
+SSH_CONNECTION_CACHE = {} # type: dict[str, paramiko.client.SSHClient]
+SFTP_CONNECTION_CACHE = {} # type: dict[str, paramiko.sftp_client.SFTPClient]
+
+
+class Connection(ConnectionBase):
+ ''' SSH based connections with Paramiko '''
+
+ transport = 'paramiko'
+ _log_channel = None
+
+ def _cache_key(self):
+ return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
+
+ def _connect(self):
+ cache_key = self._cache_key()
+ if cache_key in SSH_CONNECTION_CACHE:
+ self.ssh = SSH_CONNECTION_CACHE[cache_key]
+ else:
+ self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
+
+ self._connected = True
+ return self
+
+ def _set_log_channel(self, name):
+ '''Mimic paramiko.SSHClient.set_log_channel'''
+ self._log_channel = name
+
+ def _parse_proxy_command(self, port=22):
+ proxy_command = None
+ # Parse ansible_ssh_common_args, specifically looking for ProxyCommand
+ ssh_args = [
+ self.get_option('ssh_extra_args'),
+ self.get_option('ssh_common_args'),
+ self.get_option('ssh_args', ''),
+ ]
+
+ args = self._split_ssh_args(' '.join(ssh_args))
+ for i, arg in enumerate(args):
+ if arg.lower() == 'proxycommand':
+ # _split_ssh_args split ProxyCommand from the command itself
+ proxy_command = args[i + 1]
+ else:
+ # ProxyCommand and the command itself are a single string
+ match = SETTINGS_REGEX.match(arg)
+ if match:
+ if match.group(1).lower() == 'proxycommand':
+ proxy_command = match.group(2)
+
+ if proxy_command:
+ break
+
+ proxy_command = self.get_option('proxy_command') or proxy_command
+
+ sock_kwarg = {}
+ if proxy_command:
+ replacers = {
+ '%h': self._play_context.remote_addr,
+ '%p': port,
+ '%r': self._play_context.remote_user
+ }
+ for find, replace in replacers.items():
+ proxy_command = proxy_command.replace(find, str(replace))
+ try:
+ sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
+ display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
+ except AttributeError:
+ display.warning('Paramiko ProxyCommand support unavailable. '
+ 'Please upgrade to Paramiko 1.9.0 or newer. '
+ 'Not using configured ProxyCommand')
+
+ return sock_kwarg
+
+ def _connect_uncached(self):
+ ''' activates the connection object '''
+
+ if paramiko is None:
+ raise AnsibleError("paramiko is not installed: %s" % to_native(PARAMIKO_IMPORT_ERR))
+
+ port = self._play_context.port or 22
+ display.vvv("ESTABLISH PARAMIKO SSH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
+ host=self._play_context.remote_addr)
+
+ ssh = paramiko.SSHClient()
+
+ # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently
+ # is keeping or omitting rsa-sha2 algorithms
+ paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ())
+ paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ())
+ use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms')
+ disabled_algorithms = {}
+ if not use_rsa_sha2_algorithms:
+ if paramiko_preferred_pubkeys:
+ disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a)
+ if paramiko_preferred_hostkeys:
+ disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a)
+
+ # override paramiko's default logger name
+ if self._log_channel is not None:
+ ssh.set_log_channel(self._log_channel)
+
+ self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
+
+ if self.get_option('host_key_checking'):
+ for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
+ try:
+ # TODO: check if we need to look at several possible locations, possible for loop
+ ssh.load_system_host_keys(ssh_known_hosts)
+ break
+ except IOError:
+ pass # file was not found, but not required to function
+ ssh.load_system_host_keys()
+
+ ssh_connect_kwargs = self._parse_proxy_command(port)
+
+ ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
+
+ conn_password = self.get_option('password') or self._play_context.password
+
+ allow_agent = True
+
+ if conn_password is not None:
+ allow_agent = False
+
+ try:
+ key_filename = None
+ if self._play_context.private_key_file:
+ key_filename = os.path.expanduser(self._play_context.private_key_file)
+
+ # paramiko 2.2 introduced auth_timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
+ ssh_connect_kwargs['auth_timeout'] = self._play_context.timeout
+
+ # paramiko 1.15 introduced banner timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'):
+ ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout')
+
+ ssh.connect(
+ self._play_context.remote_addr.lower(),
+ username=self._play_context.remote_user,
+ allow_agent=allow_agent,
+ look_for_keys=self.get_option('look_for_keys'),
+ key_filename=key_filename,
+ password=conn_password,
+ timeout=self._play_context.timeout,
+ port=port,
+ disabled_algorithms=disabled_algorithms,
+ **ssh_connect_kwargs,
+ )
+ except paramiko.ssh_exception.BadHostKeyException as e:
+ raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
+ except paramiko.ssh_exception.AuthenticationException as e:
+ msg = 'Failed to authenticate: {0}'.format(to_text(e))
+ raise AnsibleAuthenticationFailure(msg)
+ except Exception as e:
+ msg = to_text(e)
+ if u"PID check failed" in msg:
+ raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
+ elif u"Private key file is encrypted" in msg:
+ msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
+ self._play_context.remote_user, self._play_context.remote_addr, port, msg)
+ raise AnsibleConnectionFailure(msg)
+ else:
+ raise AnsibleConnectionFailure(msg)
+
+ return ssh
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the remote host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ bufsize = 4096
+
+ try:
+ self.ssh.get_transport().set_keepalive(5)
+ chan = self.ssh.get_transport().open_session()
+ except Exception as e:
+ text_e = to_text(e)
+ msg = u"Failed to open session"
+ if text_e:
+ msg += u": %s" % text_e
+ raise AnsibleConnectionFailure(to_native(msg))
+
+ # sudo usually requires a PTY (cf. requiretty option), therefore
+ # we give it one by default (pty=True in ansible.cfg), and we try
+ # to initialise from the calling environment when sudoable is enabled
+ if self.get_option('pty') and sudoable:
+ chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
+
+ display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
+
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+
+ no_prompt_out = b''
+ no_prompt_err = b''
+ become_output = b''
+
+ try:
+ chan.exec_command(cmd)
+ if self.become and self.become.expect_prompt():
+ passprompt = False
+ become_sucess = False
+ while not (become_sucess or passprompt):
+ display.debug('Waiting for Privilege Escalation input')
+
+ chunk = chan.recv(bufsize)
+ display.debug("chunk is: %s" % chunk)
+ if not chunk:
+ if b'unknown user' in become_output:
+ n_become_user = to_native(self.become.get_option('become_user',
+ playcontext=self._play_context))
+ raise AnsibleError('user %s does not exist' % n_become_user)
+ else:
+ break
+ # raise AnsibleError('ssh connection closed waiting for password prompt')
+ become_output += chunk
+
+ # need to check every line because we might get lectured
+ # and we might get the middle of a line in a chunk
+ for l in become_output.splitlines(True):
+ if self.become.check_success(l):
+ become_sucess = True
+ break
+ elif self.become.check_password_prompt(l):
+ passprompt = True
+ break
+
+ if passprompt:
+ if self.become:
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ chan.sendall(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ raise AnsibleError("A password is required but none was supplied")
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
+ except socket.timeout:
+ raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
+
+ stdout = b''.join(chan.makefile('rb', bufsize))
+ stderr = b''.join(chan.makefile_stderr('rb', bufsize))
+
+ return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+
+ try:
+ self.sftp = self.ssh.open_sftp()
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)" % e)
+
+ try:
+ self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except IOError:
+ raise AnsibleError("failed to transfer file to %s" % out_path)
+
+ def _connect_sftp(self):
+
+ cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
+ if cache_key in SFTP_CONNECTION_CACHE:
+ return SFTP_CONNECTION_CACHE[cache_key]
+ else:
+ result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
+ return result
+
+ def fetch_file(self, in_path, out_path):
+ ''' save a remote file to the specified path '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ try:
+ self.sftp = self._connect_sftp()
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e))
+
+ try:
+ self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except IOError:
+ raise AnsibleError("failed to transfer file from %s" % in_path)
+
+ def _any_keys_added(self):
+
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ return True
+ return False
+
+ def _save_ssh_host_keys(self, filename):
+ '''
+ not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
+ don't complain about it :)
+ '''
+
+ if not self._any_keys_added():
+ return False
+
+ path = os.path.expanduser("~/.ssh")
+ makedirs_safe(path)
+
+ with open(filename, 'w') as f:
+
+ for hostname, keys in self.ssh._host_keys.items():
+
+ for keytype, key in keys.items():
+
+ # was f.write
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if not added_this_time:
+ f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
+
+ for hostname, keys in self.ssh._host_keys.items():
+
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
+
+ def reset(self):
+ if not self._connected:
+ return
+ self.close()
+ self._connect()
+
+ def close(self):
+ ''' terminate the connection '''
+
+ cache_key = self._cache_key()
+ SSH_CONNECTION_CACHE.pop(cache_key, None)
+ SFTP_CONNECTION_CACHE.pop(cache_key, None)
+
+ if hasattr(self, 'sftp'):
+ if self.sftp is not None:
+ self.sftp.close()
+
+ if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
+
+ # add any new SSH host keys -- warning -- this could be slow
+ # (This doesn't acquire the connection lock because it needs
+ # to exclude only other known_hosts writers, not connections
+ # that are starting up.)
+ lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
+ dirname = os.path.dirname(self.keyfile)
+ makedirs_safe(dirname)
+
+ KEY_LOCK = open(lockfile, 'w')
+ fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
+
+ try:
+ # just in case any were added recently
+
+ self.ssh.load_system_host_keys()
+ self.ssh._host_keys.update(self.ssh._system_host_keys)
+
+ # gather information about the current key file, so
+ # we can ensure the new file has the correct mode/owner
+
+ key_dir = os.path.dirname(self.keyfile)
+ if os.path.exists(self.keyfile):
+ key_stat = os.stat(self.keyfile)
+ mode = key_stat.st_mode
+ uid = key_stat.st_uid
+ gid = key_stat.st_gid
+ else:
+ mode = 33188
+ uid = os.getuid()
+ gid = os.getgid()
+
+ # Save the new keys to a temporary file and move it into place
+ # rather than rewriting the file. We set delete=False because
+ # the file will be moved into place rather than cleaned up.
+
+ tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
+ os.chmod(tmp_keyfile.name, mode & 0o7777)
+ os.chown(tmp_keyfile.name, uid, gid)
+
+ self._save_ssh_host_keys(tmp_keyfile.name)
+ tmp_keyfile.close()
+
+ os.rename(tmp_keyfile.name, self.keyfile)
+
+ except Exception:
+
+ # unable to save keys, including scenario when key was invalid
+ # and caught earlier
+ traceback.print_exc()
+ fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
+
+ self.ssh.close()
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/psrp.py b/lib/ansible/plugins/connection/psrp.py
new file mode 100644
index 0000000..dfcf0e5
--- /dev/null
+++ b/lib/ansible/plugins/connection/psrp.py
@@ -0,0 +1,898 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+author: Ansible Core Team
+name: psrp
+short_description: Run tasks over Microsoft PowerShell Remoting Protocol
+description:
+- Run commands or put/fetch on a target via PSRP (WinRM plugin)
+- This is similar to the I(winrm) connection plugin which uses the same
+ underlying transport but instead runs in a PowerShell interpreter.
+version_added: "2.7"
+requirements:
+- pypsrp>=0.4.0 (Python library)
+extends_documentation_fragment:
+ - connection_pipelining
+options:
+ # transport options
+ remote_addr:
+ description:
+ - The hostname or IP address of the remote host.
+ default: inventory_hostname
+ type: str
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_psrp_host
+ remote_user:
+ description:
+ - The user to log in as.
+ type: str
+ vars:
+ - name: ansible_user
+ - name: ansible_psrp_user
+ keyword:
+ - name: remote_user
+ remote_password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ type: str
+ vars:
+ - name: ansible_password
+ - name: ansible_winrm_pass
+ - name: ansible_winrm_password
+ aliases:
+ - password # Needed for --ask-pass to come through on delegation
+ port:
+ description:
+ - The port for PSRP to connect on the remote target.
+ - Default is C(5986) if I(protocol) is not defined or is C(https),
+ otherwise the port is C(5985).
+ type: int
+ vars:
+ - name: ansible_port
+ - name: ansible_psrp_port
+ keyword:
+ - name: port
+ protocol:
+ description:
+ - Set the protocol to use for the connection.
+ - Default is C(https) if I(port) is not defined or I(port) is not C(5985).
+ choices:
+ - http
+ - https
+ type: str
+ vars:
+ - name: ansible_psrp_protocol
+ path:
+ description:
+ - The URI path to connect to.
+ type: str
+ vars:
+ - name: ansible_psrp_path
+ default: 'wsman'
+ auth:
+ description:
+ - The authentication protocol to use when authenticating the remote user.
+ - The default, C(negotiate), will attempt to use C(Kerberos) if it is
+ available and fall back to C(NTLM) if it isn't.
+ type: str
+ vars:
+ - name: ansible_psrp_auth
+ choices:
+ - basic
+ - certificate
+ - negotiate
+ - kerberos
+ - ntlm
+ - credssp
+ default: negotiate
+ cert_validation:
+ description:
+ - Whether to validate the remote server's certificate or not.
+ - Set to C(ignore) to not validate any certificates.
+ - I(ca_cert) can be set to the path of a PEM certificate chain to
+ use in the validation.
+ choices:
+ - validate
+ - ignore
+ default: validate
+ type: str
+ vars:
+ - name: ansible_psrp_cert_validation
+ ca_cert:
+ description:
+ - The path to a PEM certificate chain to use when validating the server's
+ certificate.
+ - This value is ignored if I(cert_validation) is set to C(ignore).
+ type: path
+ vars:
+ - name: ansible_psrp_cert_trust_path
+ - name: ansible_psrp_ca_cert
+ aliases: [ cert_trust_path ]
+ connection_timeout:
+ description:
+ - The connection timeout for making the request to the remote host.
+ - This is measured in seconds.
+ type: int
+ vars:
+ - name: ansible_psrp_connection_timeout
+ default: 30
+ read_timeout:
+ description:
+ - The read timeout for receiving data from the remote host.
+ - This value must always be greater than I(operation_timeout).
+ - This option requires pypsrp >= 0.3.
+ - This is measured in seconds.
+ type: int
+ vars:
+ - name: ansible_psrp_read_timeout
+ default: 30
+ version_added: '2.8'
+ reconnection_retries:
+ description:
+ - The number of retries on connection errors.
+ type: int
+ vars:
+ - name: ansible_psrp_reconnection_retries
+ default: 0
+ version_added: '2.8'
+ reconnection_backoff:
+ description:
+ - The backoff time to use in between reconnection attempts.
+ (First sleeps X, then sleeps 2*X, then sleeps 4*X, ...)
+ - This is measured in seconds.
+ - The C(ansible_psrp_reconnection_backoff) variable was added in Ansible
+ 2.9.
+ type: int
+ vars:
+ - name: ansible_psrp_connection_backoff
+ - name: ansible_psrp_reconnection_backoff
+ default: 2
+ version_added: '2.8'
+ message_encryption:
+ description:
+ - Controls the message encryption settings, this is different from TLS
+ encryption when I(ansible_psrp_protocol) is C(https).
+ - Only the auth protocols C(negotiate), C(kerberos), C(ntlm), and
+ C(credssp) can do message encryption. The other authentication protocols
+ only support encryption when C(protocol) is set to C(https).
+ - C(auto) means means message encryption is only used when not using
+ TLS/HTTPS.
+ - C(always) is the same as C(auto) but message encryption is always used
+ even when running over TLS/HTTPS.
+ - C(never) disables any encryption checks that are in place when running
+ over HTTP and disables any authentication encryption processes.
+ type: str
+ vars:
+ - name: ansible_psrp_message_encryption
+ choices:
+ - auto
+ - always
+ - never
+ default: auto
+ proxy:
+ description:
+ - Set the proxy URL to use when connecting to the remote host.
+ vars:
+ - name: ansible_psrp_proxy
+ type: str
+ ignore_proxy:
+ description:
+ - Will disable any environment proxy settings and connect directly to the
+ remote host.
+ - This option is ignored if C(proxy) is set.
+ vars:
+ - name: ansible_psrp_ignore_proxy
+ type: bool
+ default: 'no'
+
+ # auth options
+ certificate_key_pem:
+ description:
+ - The local path to an X509 certificate key to use with certificate auth.
+ type: path
+ vars:
+ - name: ansible_psrp_certificate_key_pem
+ certificate_pem:
+ description:
+ - The local path to an X509 certificate to use with certificate auth.
+ type: path
+ vars:
+ - name: ansible_psrp_certificate_pem
+ credssp_auth_mechanism:
+ description:
+ - The sub authentication mechanism to use with CredSSP auth.
+ - When C(auto), both Kerberos and NTLM is attempted with kerberos being
+ preferred.
+ type: str
+ choices:
+ - auto
+ - kerberos
+ - ntlm
+ default: auto
+ vars:
+ - name: ansible_psrp_credssp_auth_mechanism
+ credssp_disable_tlsv1_2:
+ description:
+ - Disables the use of TLSv1.2 on the CredSSP authentication channel.
+ - This should not be set to C(yes) unless dealing with a host that does not
+ have TLSv1.2.
+ default: no
+ type: bool
+ vars:
+ - name: ansible_psrp_credssp_disable_tlsv1_2
+ credssp_minimum_version:
+ description:
+ - The minimum CredSSP server authentication version that will be accepted.
+ - Set to C(5) to ensure the server has been patched and is not vulnerable
+ to CVE 2018-0886.
+ default: 2
+ type: int
+ vars:
+ - name: ansible_psrp_credssp_minimum_version
+ negotiate_delegate:
+ description:
+ - Allow the remote user the ability to delegate it's credentials to another
+ server, i.e. credential delegation.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ type: bool
+ vars:
+ - name: ansible_psrp_negotiate_delegate
+ negotiate_hostname_override:
+ description:
+ - Override the remote hostname when searching for the host in the Kerberos
+ lookup.
+ - This allows Ansible to connect over IP but authenticate with the remote
+ server using it's DNS name.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ type: str
+ vars:
+ - name: ansible_psrp_negotiate_hostname_override
+ negotiate_send_cbt:
+ description:
+ - Send the Channel Binding Token (CBT) structure when authenticating.
+ - CBT is used to provide extra protection against Man in the Middle C(MitM)
+ attacks by binding the outer transport channel to the auth channel.
+ - CBT is not used when using just C(HTTP), only C(HTTPS).
+ default: yes
+ type: bool
+ vars:
+ - name: ansible_psrp_negotiate_send_cbt
+ negotiate_service:
+ description:
+ - Override the service part of the SPN used during Kerberos authentication.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ default: WSMAN
+ type: str
+ vars:
+ - name: ansible_psrp_negotiate_service
+
+ # protocol options
+ operation_timeout:
+ description:
+ - Sets the WSMan timeout for each operation.
+ - This is measured in seconds.
+ - This should not exceed the value for C(connection_timeout).
+ type: int
+ vars:
+ - name: ansible_psrp_operation_timeout
+ default: 20
+ max_envelope_size:
+ description:
+ - Sets the maximum size of each WSMan message sent to the remote host.
+ - This is measured in bytes.
+ - Defaults to C(150KiB) for compatibility with older hosts.
+ type: int
+ vars:
+ - name: ansible_psrp_max_envelope_size
+ default: 153600
+ configuration_name:
+ description:
+ - The name of the PowerShell configuration endpoint to connect to.
+ type: str
+ vars:
+ - name: ansible_psrp_configuration_name
+ default: Microsoft.PowerShell
+"""
+
+import base64
+import json
+import logging
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleConnectionFailure, AnsibleError
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _common_args
+from ansible.utils.display import Display
+from ansible.utils.hashing import sha1
+
+HAS_PYPSRP = True
+PYPSRP_IMP_ERR = None
+try:
+ import pypsrp
+ from pypsrp.complex_objects import GenericComplexObject, PSInvocationState, RunspacePoolState
+ from pypsrp.exceptions import AuthenticationError, WinRMError
+ from pypsrp.host import PSHost, PSHostUserInterface
+ from pypsrp.powershell import PowerShell, RunspacePool
+ from pypsrp.wsman import WSMan, AUTH_KWARGS
+ from requests.exceptions import ConnectionError, ConnectTimeout
+except ImportError as err:
+ HAS_PYPSRP = False
+ PYPSRP_IMP_ERR = err
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+
+ transport = 'psrp'
+ module_implementation_preferences = ('.ps1', '.exe', '')
+ allow_executable = False
+ has_pipelining = True
+ allow_extras = True
+
+ def __init__(self, *args, **kwargs):
+ self.always_pipeline_modules = True
+ self.has_native_async = True
+
+ self.runspace = None
+ self.host = None
+ self._last_pipeline = False
+
+ self._shell_type = 'powershell'
+ super(Connection, self).__init__(*args, **kwargs)
+
+ if not C.DEFAULT_DEBUG:
+ logging.getLogger('pypsrp').setLevel(logging.WARNING)
+ logging.getLogger('requests_credssp').setLevel(logging.INFO)
+ logging.getLogger('urllib3').setLevel(logging.INFO)
+
+ def _connect(self):
+ if not HAS_PYPSRP:
+ raise AnsibleError("pypsrp or dependencies are not installed: %s"
+ % to_native(PYPSRP_IMP_ERR))
+ super(Connection, self)._connect()
+ self._build_kwargs()
+ display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" %
+ (self._psrp_user, self._psrp_port, self._psrp_host),
+ host=self._psrp_host)
+
+ if not self.runspace:
+ connection = WSMan(**self._psrp_conn_kwargs)
+
+ # create our pseudo host to capture the exit code and host output
+ host_ui = PSHostUserInterface()
+ self.host = PSHost(None, None, False, "Ansible PSRP Host", None,
+ host_ui, None)
+
+ self.runspace = RunspacePool(
+ connection, host=self.host,
+ configuration_name=self._psrp_configuration_name
+ )
+ display.vvvvv(
+ "PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" %
+ (self._psrp_auth, self._psrp_configuration_name,
+ connection.transport.endpoint), host=self._psrp_host
+ )
+ try:
+ self.runspace.open()
+ except AuthenticationError as e:
+ raise AnsibleConnectionFailure("failed to authenticate with "
+ "the server: %s" % to_native(e))
+ except WinRMError as e:
+ raise AnsibleConnectionFailure(
+ "psrp connection failure during runspace open: %s"
+ % to_native(e)
+ )
+ except (ConnectionError, ConnectTimeout) as e:
+ raise AnsibleConnectionFailure(
+ "Failed to connect to the host via PSRP: %s"
+ % to_native(e)
+ )
+
+ self._connected = True
+ self._last_pipeline = None
+ return self
+
+ def reset(self):
+ if not self._connected:
+ self.runspace = None
+ return
+
+ # Try out best to ensure the runspace is closed to free up server side resources
+ try:
+ self.close()
+ except Exception as e:
+ # There's a good chance the connection was already closed so just log the error and move on
+ display.debug("PSRP reset - failed to closed runspace: %s" % to_text(e))
+
+ display.vvvvv("PSRP: Reset Connection", host=self._psrp_host)
+ self.runspace = None
+ self._connect()
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data,
+ sudoable=sudoable)
+
+ if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
+ # This is a PowerShell script encoded by the shell plugin, we will
+ # decode the script and execute it in the runspace instead of
+ # starting a new interpreter to save on time
+ b_command = base64.b64decode(cmd.split(" ")[-1])
+ script = to_text(b_command, 'utf-16-le')
+ in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru")
+
+ if in_data and in_data.startswith(u"#!"):
+ # ANSIBALLZ wrapper, we need to get the interpreter and execute
+ # that as the script - note this won't work as basic.py relies
+ # on packages not available on Windows, once fixed we can enable
+ # this path
+ interpreter = to_native(in_data.splitlines()[0][2:])
+ # script = "$input | &'%s' -" % interpreter
+ # in_data = to_text(in_data)
+ raise AnsibleError("cannot run the interpreter '%s' on the psrp "
+ "connection plugin" % interpreter)
+
+ # call build_module_command to get the bootstrap wrapper text
+ bootstrap_wrapper = self._shell.build_module_command('', '', '')
+ if bootstrap_wrapper == cmd:
+ # Do not display to the user each invocation of the bootstrap wrapper
+ display.vvv("PSRP: EXEC (via pipeline wrapper)")
+ else:
+ display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host)
+ else:
+ # In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the
+ # rc is propagated back to the connection plugin.
+ script = to_text(u"%s\nexit $LASTEXITCODE" % cmd)
+ display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host)
+
+ rc, stdout, stderr = self._exec_psrp_script(script, in_data)
+ return rc, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+
+ out_path = self._shell._unquote(out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host)
+
+ copy_script = '''begin {
+ $ErrorActionPreference = "Stop"
+ $WarningPreference = "Continue"
+ $path = $MyInvocation.UnboundArguments[0]
+ $fd = [System.IO.File]::Create($path)
+ $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+ $bytes = @()
+
+ $bindingFlags = [System.Reflection.BindingFlags]'NonPublic, Instance'
+ Function Get-Property {
+ <#
+ .SYNOPSIS
+ Gets the private/internal property specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name
+ )
+
+ $Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null)
+ }
+
+ Function Set-Property {
+ <#
+ .SYNOPSIS
+ Sets the private/internal property specified on the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name,
+
+ [Parameter(Mandatory=$true, Position=2)]
+ [AllowNull()]
+ [System.Object]
+ $Value
+ )
+
+ $Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null)
+ }
+
+ Function Get-Field {
+ <#
+ .SYNOPSIS
+ Gets the private/internal field specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name
+ )
+
+ $Object.GetType().GetField($Name, $bindingFlags).GetValue($Object)
+ }
+
+ # MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS
+ # Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal
+ # but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the
+ # host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This
+ # isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed.
+ # https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325
+ try {
+ $Host | Get-Property 'ExternalHost' | `
+ Get-Field '_transportManager' | `
+ Get-Property 'Fragmentor' | `
+ Get-Property 'DeserializationContext' | `
+ Set-Property 'MaximumAllowedMemory' $null
+ } catch {}
+}
+process {
+ $bytes = [System.Convert]::FromBase64String($input)
+ $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
+ $fd.Write($bytes, 0, $bytes.Length)
+}
+end {
+ $fd.Close()
+
+ $algo.TransformFinalBlock($bytes, 0, 0) > $null
+ $hash = [System.BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant()
+ Write-Output -InputObject "{`"sha1`":`"$hash`"}"
+}
+'''
+
+ # Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info
+ # fields that PSRP adds. Adjust to size of the base64 encoded bytes length.
+ buffer_size = int((self.runspace.connection.max_payload_size - 82) / 4 * 3)
+
+ sha1_hash = sha1()
+
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_in_path):
+ raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
+
+ def read_gen():
+ offset = 0
+
+ with open(b_in_path, 'rb') as src_fd:
+ for b_data in iter((lambda: src_fd.read(buffer_size)), b""):
+ data_len = len(b_data)
+ offset += data_len
+ sha1_hash.update(b_data)
+
+ # PSRP technically supports sending raw bytes but that method requires a larger CLIXML message.
+ # Sending base64 is still more efficient here.
+ display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, data_len),
+ host=self._psrp_host)
+ b64_data = base64.b64encode(b_data)
+ yield [to_text(b64_data)]
+
+ if offset == 0: # empty file
+ yield [""]
+
+ rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path])
+
+ if rc != 0:
+ raise AnsibleError(to_native(stderr))
+
+ put_output = json.loads(to_text(stdout))
+ local_sha1 = sha1_hash.hexdigest()
+ remote_sha1 = put_output.get("sha1")
+
+ if not remote_sha1:
+ raise AnsibleError("Remote sha1 was not returned, stdout: '%s', stderr: '%s'"
+ % (to_native(stdout), to_native(stderr)))
+
+ if not remote_sha1 == local_sha1:
+ raise AnsibleError("Remote sha1 hash %s does not match local hash %s"
+ % (to_native(remote_sha1), to_native(local_sha1)))
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path),
+ host=self._psrp_host)
+
+ in_path = self._shell._unquote(in_path)
+ out_path = out_path.replace('\\', '/')
+
+ # because we are dealing with base64 data we need to get the max size
+ # of the bytes that the base64 size would equal
+ max_b64_size = int(self.runspace.connection.max_payload_size -
+ (self.runspace.connection.max_payload_size / 4 * 3))
+ buffer_size = max_b64_size - (max_b64_size % 1024)
+
+ # setup the file stream with read only mode
+ setup_script = '''$ErrorActionPreference = "Stop"
+$path = '%s'
+
+if (Test-Path -Path $path -PathType Leaf) {
+ $fs = New-Object -TypeName System.IO.FileStream -ArgumentList @(
+ $path,
+ [System.IO.FileMode]::Open,
+ [System.IO.FileAccess]::Read,
+ [System.IO.FileShare]::Read
+ )
+ $buffer_size = %d
+} elseif (Test-Path -Path $path -PathType Container) {
+ Write-Output -InputObject "[DIR]"
+} else {
+ Write-Error -Message "$path does not exist"
+ $host.SetShouldExit(1)
+}''' % (self._shell._escape(in_path), buffer_size)
+
+ # read the file stream at the offset and return the b64 string
+ read_script = '''$ErrorActionPreference = "Stop"
+$fs.Seek(%d, [System.IO.SeekOrigin]::Begin) > $null
+$buffer = New-Object -TypeName byte[] -ArgumentList $buffer_size
+$bytes_read = $fs.Read($buffer, 0, $buffer_size)
+
+if ($bytes_read -gt 0) {
+ $bytes = $buffer[0..($bytes_read - 1)]
+ Write-Output -InputObject ([System.Convert]::ToBase64String($bytes))
+}'''
+
+ # need to run the setup script outside of the local scope so the
+ # file stream stays active between fetch operations
+ rc, stdout, stderr = self._exec_psrp_script(setup_script,
+ use_local_scope=False)
+ if rc != 0:
+ raise AnsibleError("failed to setup file stream for fetch '%s': %s"
+ % (out_path, to_native(stderr)))
+ elif stdout.strip() == '[DIR]':
+ # to be consistent with other connection plugins, we assume the caller has created the target dir
+ return
+
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
+ # to be consistent with other connection plugins, we assume the caller has created the target dir
+ offset = 0
+ with open(b_out_path, 'wb') as out_file:
+ while True:
+ display.vvvvv("PSRP FETCH %s to %s (offset=%d" %
+ (in_path, out_path, offset), host=self._psrp_host)
+ rc, stdout, stderr = self._exec_psrp_script(read_script % offset)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to '%s': %s"
+ % (out_path, to_native(stderr)))
+
+ data = base64.b64decode(stdout.strip())
+ out_file.write(data)
+ if len(data) < buffer_size:
+ break
+ offset += len(data)
+
+ rc, stdout, stderr = self._exec_psrp_script("$fs.Close()")
+ if rc != 0:
+ display.warning("failed to close remote file stream of file "
+ "'%s': %s" % (in_path, to_native(stderr)))
+
+ def close(self):
+ if self.runspace and self.runspace.state == RunspacePoolState.OPENED:
+ display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id),
+ host=self._psrp_host)
+ self.runspace.close()
+ self.runspace = None
+ self._connected = False
+ self._last_pipeline = None
+
+ def _build_kwargs(self):
+ self._psrp_host = self.get_option('remote_addr')
+ self._psrp_user = self.get_option('remote_user')
+ self._psrp_pass = self.get_option('remote_password')
+
+ protocol = self.get_option('protocol')
+ port = self.get_option('port')
+ if protocol is None and port is None:
+ protocol = 'https'
+ port = 5986
+ elif protocol is None:
+ protocol = 'https' if int(port) != 5985 else 'http'
+ elif port is None:
+ port = 5986 if protocol == 'https' else 5985
+
+ self._psrp_protocol = protocol
+ self._psrp_port = int(port)
+
+ self._psrp_path = self.get_option('path')
+ self._psrp_auth = self.get_option('auth')
+ # cert validation can either be a bool or a path to the cert
+ cert_validation = self.get_option('cert_validation')
+ cert_trust_path = self.get_option('ca_cert')
+ if cert_validation == 'ignore':
+ self._psrp_cert_validation = False
+ elif cert_trust_path is not None:
+ self._psrp_cert_validation = cert_trust_path
+ else:
+ self._psrp_cert_validation = True
+
+ self._psrp_connection_timeout = self.get_option('connection_timeout') # Can be None
+ self._psrp_read_timeout = self.get_option('read_timeout') # Can be None
+ self._psrp_message_encryption = self.get_option('message_encryption')
+ self._psrp_proxy = self.get_option('proxy')
+ self._psrp_ignore_proxy = boolean(self.get_option('ignore_proxy'))
+ self._psrp_operation_timeout = int(self.get_option('operation_timeout'))
+ self._psrp_max_envelope_size = int(self.get_option('max_envelope_size'))
+ self._psrp_configuration_name = self.get_option('configuration_name')
+ self._psrp_reconnection_retries = int(self.get_option('reconnection_retries'))
+ self._psrp_reconnection_backoff = float(self.get_option('reconnection_backoff'))
+
+ self._psrp_certificate_key_pem = self.get_option('certificate_key_pem')
+ self._psrp_certificate_pem = self.get_option('certificate_pem')
+ self._psrp_credssp_auth_mechanism = self.get_option('credssp_auth_mechanism')
+ self._psrp_credssp_disable_tlsv1_2 = self.get_option('credssp_disable_tlsv1_2')
+ self._psrp_credssp_minimum_version = self.get_option('credssp_minimum_version')
+ self._psrp_negotiate_send_cbt = self.get_option('negotiate_send_cbt')
+ self._psrp_negotiate_delegate = self.get_option('negotiate_delegate')
+ self._psrp_negotiate_hostname_override = self.get_option('negotiate_hostname_override')
+ self._psrp_negotiate_service = self.get_option('negotiate_service')
+
+ supported_args = []
+ for auth_kwarg in AUTH_KWARGS.values():
+ supported_args.extend(auth_kwarg)
+ extra_args = {v.replace('ansible_psrp_', '') for v in self.get_option('_extras')}
+ unsupported_args = extra_args.difference(supported_args)
+
+ for arg in unsupported_args:
+ display.warning("ansible_psrp_%s is unsupported by the current "
+ "psrp version installed" % arg)
+
+ self._psrp_conn_kwargs = dict(
+ server=self._psrp_host, port=self._psrp_port,
+ username=self._psrp_user, password=self._psrp_pass,
+ ssl=self._psrp_protocol == 'https', path=self._psrp_path,
+ auth=self._psrp_auth, cert_validation=self._psrp_cert_validation,
+ connection_timeout=self._psrp_connection_timeout,
+ encryption=self._psrp_message_encryption, proxy=self._psrp_proxy,
+ no_proxy=self._psrp_ignore_proxy,
+ max_envelope_size=self._psrp_max_envelope_size,
+ operation_timeout=self._psrp_operation_timeout,
+ certificate_key_pem=self._psrp_certificate_key_pem,
+ certificate_pem=self._psrp_certificate_pem,
+ credssp_auth_mechanism=self._psrp_credssp_auth_mechanism,
+ credssp_disable_tlsv1_2=self._psrp_credssp_disable_tlsv1_2,
+ credssp_minimum_version=self._psrp_credssp_minimum_version,
+ negotiate_send_cbt=self._psrp_negotiate_send_cbt,
+ negotiate_delegate=self._psrp_negotiate_delegate,
+ negotiate_hostname_override=self._psrp_negotiate_hostname_override,
+ negotiate_service=self._psrp_negotiate_service,
+ )
+
+ # Check if PSRP version supports newer read_timeout argument (needs pypsrp 0.3.0+)
+ if hasattr(pypsrp, 'FEATURES') and 'wsman_read_timeout' in pypsrp.FEATURES:
+ self._psrp_conn_kwargs['read_timeout'] = self._psrp_read_timeout
+ elif self._psrp_read_timeout is not None:
+ display.warning("ansible_psrp_read_timeout is unsupported by the current psrp version installed, "
+ "using ansible_psrp_connection_timeout value for read_timeout instead.")
+
+ # Check if PSRP version supports newer reconnection_retries argument (needs pypsrp 0.3.0+)
+ if hasattr(pypsrp, 'FEATURES') and 'wsman_reconnections' in pypsrp.FEATURES:
+ self._psrp_conn_kwargs['reconnection_retries'] = self._psrp_reconnection_retries
+ self._psrp_conn_kwargs['reconnection_backoff'] = self._psrp_reconnection_backoff
+ else:
+ if self._psrp_reconnection_retries is not None:
+ display.warning("ansible_psrp_reconnection_retries is unsupported by the current psrp version installed.")
+ if self._psrp_reconnection_backoff is not None:
+ display.warning("ansible_psrp_reconnection_backoff is unsupported by the current psrp version installed.")
+
+ # add in the extra args that were set
+ for arg in extra_args.intersection(supported_args):
+ option = self.get_option('_extras')['ansible_psrp_%s' % arg]
+ self._psrp_conn_kwargs[arg] = option
+
+ def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, arguments=None):
+ # Check if there's a command on the current pipeline that still needs to be closed.
+ if self._last_pipeline:
+ # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
+ # can call stop without any issues.
+ self._last_pipeline.state = PSInvocationState.RUNNING
+ self._last_pipeline.stop()
+ self._last_pipeline = None
+
+ ps = PowerShell(self.runspace)
+ ps.add_script(script, use_local_scope=use_local_scope)
+ if arguments:
+ for arg in arguments:
+ ps.add_argument(arg)
+
+ ps.invoke(input=input_data)
+
+ rc, stdout, stderr = self._parse_pipeline_result(ps)
+
+ # We should really call .stop() on all pipelines that are run to decrement the concurrent command counter on
+ # PSSession but that involves another round trip and is done when the runspace is closed. We instead store the
+ # last pipeline which is closed if another command is run on the runspace.
+ self._last_pipeline = ps
+
+ return rc, stdout, stderr
+
+ def _parse_pipeline_result(self, pipeline):
+ """
+ PSRP doesn't have the same concept as other protocols with its output.
+ We need some extra logic to convert the pipeline streams and host
+ output into the format that Ansible understands.
+
+ :param pipeline: The finished PowerShell pipeline that invoked our
+ commands
+ :return: rc, stdout, stderr based on the pipeline output
+ """
+ # we try and get the rc from our host implementation, this is set if
+ # exit or $host.SetShouldExit() is called in our pipeline, if not we
+ # set to 0 if the pipeline had not errors and 1 if it did
+ rc = self.host.rc or (1 if pipeline.had_errors else 0)
+
+ # TODO: figure out a better way of merging this with the host output
+ stdout_list = []
+ for output in pipeline.output:
+ # Not all pipeline outputs are a string or contain a __str__ value,
+ # we will create our own output based on the properties of the
+ # complex object if that is the case.
+ if isinstance(output, GenericComplexObject) and output.to_string is None:
+ obj_lines = output.property_sets
+ for key, value in output.adapted_properties.items():
+ obj_lines.append(u"%s: %s" % (key, value))
+ for key, value in output.extended_properties.items():
+ obj_lines.append(u"%s: %s" % (key, value))
+ output_msg = u"\n".join(obj_lines)
+ else:
+ output_msg = to_text(output, nonstring='simplerepr')
+
+ stdout_list.append(output_msg)
+
+ if len(self.host.ui.stdout) > 0:
+ stdout_list += self.host.ui.stdout
+ stdout = u"\r\n".join(stdout_list)
+
+ stderr_list = []
+ for error in pipeline.streams.error:
+ # the error record is not as fully fleshed out like we usually get
+ # in PS, we will manually create it here
+ command_name = "%s : " % error.command_name if error.command_name else ''
+ position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else ''
+ error_msg = "%s%s\r\n%s" \
+ " + CategoryInfo : %s\r\n" \
+ " + FullyQualifiedErrorId : %s" \
+ % (command_name, str(error), position,
+ error.message, error.fq_error)
+ stacktrace = error.script_stacktrace
+ if display.verbosity >= 3 and stacktrace is not None:
+ error_msg += "\r\nStackTrace:\r\n%s" % stacktrace
+ stderr_list.append(error_msg)
+
+ if len(self.host.ui.stderr) > 0:
+ stderr_list += self.host.ui.stderr
+ stderr = u"\r\n".join([to_text(o) for o in stderr_list])
+
+ display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host)
+ display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host)
+ display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host)
+
+ # reset the host back output back to defaults, needed if running
+ # multiple pipelines on the same RunspacePool
+ self.host.rc = 0
+ self.host.ui.stdout = []
+ self.host.ui.stderr = []
+
+ return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8')
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
new file mode 100644
index 0000000..e4d9628
--- /dev/null
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -0,0 +1,1399 @@
+# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+# Copyright 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: ssh
+ short_description: connect via SSH client binary
+ description:
+ - This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
+ - Ansible does not expose a channel to allow communication between the user and the SSH process to accept
+ a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
+ use of C(ssh-agent) is highly recommended.
+ author: ansible (@core)
+ extends_documentation_fragment:
+ - connection_pipelining
+ version_added: historical
+ notes:
+ - Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
+ For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
+ - The ssh CLI tool uses return code 255 as a 'connection error', this can conflict with commands/tools that
+ also return 255 as an error code and will look like an 'unreachable' condition or 'connection error' to this plugin.
+ options:
+ host:
+ description: Hostname/IP to connect to.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_ssh_host
+ - name: delegated_vars['ansible_host']
+ - name: delegated_vars['ansible_ssh_host']
+ host_key_checking:
+ description: Determines if SSH should check host keys.
+ default: True
+ type: boolean
+ ini:
+ - section: defaults
+ key: 'host_key_checking'
+ - section: ssh_connection
+ key: 'host_key_checking'
+ version_added: '2.5'
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ version_added: '2.5'
+ vars:
+ - name: ansible_host_key_checking
+ version_added: '2.5'
+ - name: ansible_ssh_host_key_checking
+ version_added: '2.5'
+ password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ sshpass_prompt:
+ description:
+ - Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
+ - Defaults to C(Enter PIN for) when pkcs11_provider is set.
+ default: ''
+ ini:
+ - section: 'ssh_connection'
+ key: 'sshpass_prompt'
+ env:
+ - name: ANSIBLE_SSHPASS_PROMPT
+ vars:
+ - name: ansible_sshpass_prompt
+ version_added: '2.10'
+ ssh_args:
+ description: Arguments to pass to all SSH CLI tools.
+ default: '-C -o ControlMaster=auto -o ControlPersist=60s'
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_args'
+ env:
+ - name: ANSIBLE_SSH_ARGS
+ vars:
+ - name: ansible_ssh_args
+ version_added: '2.7'
+ ssh_common_args:
+ description: Common extra args for all SSH CLI tools.
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_common_args'
+ version_added: '2.7'
+ env:
+ - name: ANSIBLE_SSH_COMMON_ARGS
+ version_added: '2.7'
+ vars:
+ - name: ansible_ssh_common_args
+ cli:
+ - name: ssh_common_args
+ default: ''
+ ssh_executable:
+ default: ssh
+ description:
+ - This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH.
+ - This option is usually not required, it might be useful when access to system SSH is restricted,
+ or when using SSH wrappers to connect to remote hosts.
+ env: [{name: ANSIBLE_SSH_EXECUTABLE}]
+ ini:
+ - {key: ssh_executable, section: ssh_connection}
+ #const: ANSIBLE_SSH_EXECUTABLE
+ version_added: "2.2"
+ vars:
+ - name: ansible_ssh_executable
+ version_added: '2.7'
+ sftp_executable:
+ default: sftp
+ description:
+ - This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH.
+ env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
+ ini:
+ - {key: sftp_executable, section: ssh_connection}
+ version_added: "2.6"
+ vars:
+ - name: ansible_sftp_executable
+ version_added: '2.7'
+ scp_executable:
+ default: scp
+ description:
+ - This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH.
+ env: [{name: ANSIBLE_SCP_EXECUTABLE}]
+ ini:
+ - {key: scp_executable, section: ssh_connection}
+ version_added: "2.6"
+ vars:
+ - name: ansible_scp_executable
+ version_added: '2.7'
+ scp_extra_args:
+ description: Extra exclusive to the C(scp) CLI
+ vars:
+ - name: ansible_scp_extra_args
+ env:
+ - name: ANSIBLE_SCP_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: scp_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ cli:
+ - name: scp_extra_args
+ default: ''
+ sftp_extra_args:
+ description: Extra exclusive to the C(sftp) CLI
+ vars:
+ - name: ansible_sftp_extra_args
+ env:
+ - name: ANSIBLE_SFTP_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: sftp_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ cli:
+ - name: sftp_extra_args
+ default: ''
+ ssh_extra_args:
+ description: Extra exclusive to the SSH CLI.
+ vars:
+ - name: ansible_ssh_extra_args
+ env:
+ - name: ANSIBLE_SSH_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: ssh_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ cli:
+ - name: ssh_extra_args
+ default: ''
+ reconnection_retries:
+ description:
+ - Number of attempts to connect.
+ - Ansible retries connections only if it gets an SSH error with a return code of 255.
+ - Any errors with return codes other than 255 indicate an issue with program execution.
+ default: 0
+ type: integer
+ env:
+ - name: ANSIBLE_SSH_RETRIES
+ ini:
+ - section: connection
+ key: retries
+ - section: ssh_connection
+ key: retries
+ vars:
+ - name: ansible_ssh_retries
+ version_added: '2.7'
+ port:
+ description: Remote port to connect to.
+ type: int
+ ini:
+ - section: defaults
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ vars:
+ - name: ansible_port
+ - name: ansible_ssh_port
+ keyword:
+ - name: port
+ remote_user:
+ description:
+ - User name with which to login to the remote server, normally set by the remote_user keyword.
+ - If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ cli:
+ - name: user
+ keyword:
+ - name: remote_user
+ pipelining:
+ env:
+ - name: ANSIBLE_PIPELINING
+ - name: ANSIBLE_SSH_PIPELINING
+ ini:
+ - section: defaults
+ key: pipelining
+ - section: connection
+ key: pipelining
+ - section: ssh_connection
+ key: pipelining
+ vars:
+ - name: ansible_pipelining
+ - name: ansible_ssh_pipelining
+
+ private_key_file:
+ description:
+ - Path to private key file to use for authentication.
+ ini:
+ - section: defaults
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ - name: ansible_ssh_private_key_file
+ cli:
+ - name: private_key_file
+ option: '--private-key'
+
+ control_path:
+ description:
+ - This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
+ - Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
+ - Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
+ - Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
+ env:
+ - name: ANSIBLE_SSH_CONTROL_PATH
+ ini:
+ - key: control_path
+ section: ssh_connection
+ vars:
+ - name: ansible_control_path
+ version_added: '2.7'
+ control_path_dir:
+ default: ~/.ansible/cp
+ description:
+ - This sets the directory to use for ssh control path if the control path setting is null.
+ - Also, provides the ``%(directory)s`` variable for the control path setting.
+ env:
+ - name: ANSIBLE_SSH_CONTROL_PATH_DIR
+ ini:
+ - section: ssh_connection
+ key: control_path_dir
+ vars:
+ - name: ansible_control_path_dir
+ version_added: '2.7'
+ sftp_batch_mode:
+ default: 'yes'
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
+ ini:
+ - {key: sftp_batch_mode, section: ssh_connection}
+ type: bool
+ vars:
+ - name: ansible_sftp_batch_mode
+ version_added: '2.7'
+ ssh_transfer_method:
+ description:
+ - "Preferred method to use when transferring files over ssh"
+ - Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
+ - For OpenSSH >=9.0 you must add an additional option to enable scp (scp_extra_args="-O")
+ - Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data
+ choices: ['sftp', 'scp', 'piped', 'smart']
+ env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
+ ini:
+ - {key: transfer_method, section: ssh_connection}
+ vars:
+ - name: ansible_ssh_transfer_method
+ version_added: '2.12'
+ scp_if_ssh:
+ deprecated:
+ why: In favor of the "ssh_transfer_method" option.
+ version: "2.17"
+ alternatives: ssh_transfer_method
+ default: smart
+ description:
+ - "Preferred method to use when transferring files over SSH."
+ - When set to I(smart), Ansible will try them until one succeeds or they all fail.
+ - If set to I(True), it will force 'scp', if I(False) it will use 'sftp'.
+ - For OpenSSH >=9.0 you must add an additional option to enable scp (scp_extra_args="-O")
+ - This setting will overridden by ssh_transfer_method if set.
+ env: [{name: ANSIBLE_SCP_IF_SSH}]
+ ini:
+ - {key: scp_if_ssh, section: ssh_connection}
+ vars:
+ - name: ansible_scp_if_ssh
+ version_added: '2.7'
+ use_tty:
+ version_added: '2.5'
+ default: 'yes'
+ description: add -tt to ssh commands to force tty allocation.
+ env: [{name: ANSIBLE_SSH_USETTY}]
+ ini:
+ - {key: usetty, section: ssh_connection}
+ type: bool
+ vars:
+ - name: ansible_ssh_use_tty
+ version_added: '2.7'
+ timeout:
+ default: 10
+ description:
+ - This is the default amount of time we will wait while establishing an SSH connection.
+ - It also controls how long we can wait to access reading the connection once established (select on the socket).
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_SSH_TIMEOUT
+ version_added: '2.11'
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: ssh_connection
+ version_added: '2.11'
+ vars:
+ - name: ansible_ssh_timeout
+ version_added: '2.11'
+ cli:
+ - name: timeout
+ type: integer
+ pkcs11_provider:
+ version_added: '2.12'
+ default: ""
+ description:
+ - "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
+ - Requires sshpass version 1.06+, sshpass must support the -P option.
+ env: [{name: ANSIBLE_PKCS11_PROVIDER}]
+ ini:
+ - {key: pkcs11_provider, section: ssh_connection}
+ vars:
+ - name: ansible_ssh_pkcs11_provider
+'''
+
+import errno
+import fcntl
+import hashlib
+import os
+import pty
+import re
+import shlex
+import subprocess
+import time
+
+from functools import wraps
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+ AnsibleFileNotFound,
+)
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import PY3, text_type, binary_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.plugins.shell.powershell import _parse_clixml
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath, makedirs_safe
+
+display = Display()
+
+# error messages that indicate 255 return code is not from ssh itself.
+b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
+ # while invoking a script via -m
+ b'PHP Parse error:', # Php always returns with error
+ b'chmod: invalid mode', # chmod, but really only on AIX
+ b'chmod: A flag or octal number is not correct.', # chmod, other AIX
+ )
+
+SSHPASS_AVAILABLE = None
+SSH_DEBUG = re.compile(r'^debug\d+: .*')
+
+
+class AnsibleControlPersistBrokenPipeError(AnsibleError):
+ ''' ControlPersist broken pipe '''
+ pass
+
+
+def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
+
+ # sshpass errors
+ if command == b'sshpass':
+ # Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
+ if return_tuple[0] == 5:
+ msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
+ if remaining_retries <= 0:
+ msg = 'Invalid/incorrect password:'
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
+ raise AnsibleAuthenticationFailure(msg)
+
+ # sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
+ # No exception is raised, so the connection is retried - except when attempting to use
+ # sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
+ elif return_tuple[0] in [1, 2, 3, 4, 6]:
+ msg = 'sshpass error:'
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ details = to_native(return_tuple[2]).rstrip()
+ if "sshpass: invalid option -- 'P'" in details:
+ details = 'Installed sshpass version does not support customized password prompts. ' \
+ 'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
+ raise AnsibleError('{0} {1}'.format(msg, details))
+ msg = '{0} {1}'.format(msg, details)
+
+ if return_tuple[0] == 255:
+ SSH_ERROR = True
+ for signature in b_NOT_SSH_ERRORS:
+ # 1 == stout, 2 == stderr
+ if signature in return_tuple[1] or signature in return_tuple[2]:
+ SSH_ERROR = False
+ break
+
+ if SSH_ERROR:
+ msg = "Failed to connect to the host via ssh:"
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
+ raise AnsibleConnectionFailure(msg)
+
+ # For other errors, no exception is raised so the connection is retried and we only log the messages
+ if 1 <= return_tuple[0] <= 254:
+ msg = u"Failed to connect to the host via ssh:"
+ if no_log:
+ msg = u'{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
+ display.vvv(msg, host=host)
+
+
+def _ssh_retry(func):
+ """
+ Decorator to retry ssh/scp/sftp in the case of a connection failure
+
+ Will retry if:
+ * an exception is caught
+ * ssh returns 255
+ Will not retry if
+ * sshpass returns 5 (invalid password, to prevent account lockouts)
+ * remaining_tries is < 2
+ * retries limit reached
+ """
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ remaining_tries = int(self.get_option('reconnection_retries')) + 1
+ cmd_summary = u"%s..." % to_text(args[0])
+ conn_password = self.get_option('password') or self._play_context.password
+ for attempt in range(remaining_tries):
+ cmd = args[0]
+ if attempt != 0 and conn_password and isinstance(cmd, list):
+ # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
+ self.sshpass_pipe = os.pipe()
+ cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
+
+ try:
+ try:
+ return_tuple = func(self, *args, **kwargs)
+ # TODO: this should come from task
+ if self._play_context.no_log:
+ display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
+ else:
+ display.vvv(return_tuple, host=self.host)
+ # 0 = success
+ # 1-254 = remote command return code
+ # 255 could be a failure from the ssh command itself
+ except (AnsibleControlPersistBrokenPipeError):
+ # Retry one more time because of the ControlPersist broken pipe (see #16731)
+ cmd = args[0]
+ if conn_password and isinstance(cmd, list):
+ # This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
+ self.sshpass_pipe = os.pipe()
+ cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
+ display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
+ return_tuple = func(self, *args, **kwargs)
+
+ remaining_retries = remaining_tries - attempt - 1
+ _handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
+
+ break
+
+ # 5 = Invalid/incorrect password from sshpass
+ except AnsibleAuthenticationFailure:
+ # Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
+ raise
+
+ except (AnsibleConnectionFailure, Exception) as e:
+
+ if attempt == remaining_tries - 1:
+ raise
+ else:
+ pause = 2 ** attempt - 1
+ if pause > 30:
+ pause = 30
+
+ if isinstance(e, AnsibleConnectionFailure):
+ msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
+ else:
+ msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
+ u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
+
+ display.vv(msg, host=self.host)
+
+ time.sleep(pause)
+ continue
+
+ return return_tuple
+ return wrapped
+
+
+class Connection(ConnectionBase):
+ ''' ssh based connections '''
+
+ transport = 'ssh'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+
+ # TODO: all should come from get_option(), but not might be set at this point yet
+ self.host = self._play_context.remote_addr
+ self.port = self._play_context.port
+ self.user = self._play_context.remote_user
+ self.control_path = None
+ self.control_path_dir = None
+
+ # Windows operates differently from a POSIX connection/shell plugin,
+ # we need to set various properties to ensure SSH on Windows continues
+ # to work
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.has_native_async = True
+ self.always_pipeline_modules = True
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+ self.allow_executable = False
+
+ # The connection is created by running ssh/scp/sftp from the exec_command,
+ # put_file, and fetch_file methods, so we don't need to do any connection
+ # management here.
+
+ def _connect(self):
+ return self
+
+ @staticmethod
+ def _create_control_path(host, port, user, connection=None, pid=None):
+ '''Make a hash for the controlpath based on con attributes'''
+ pstring = '%s-%s-%s' % (host, port, user)
+ if connection:
+ pstring += '-%s' % connection
+ if pid:
+ pstring += '-%s' % to_text(pid)
+ m = hashlib.sha1()
+ m.update(to_bytes(pstring))
+ digest = m.hexdigest()
+ cpath = '%(directory)s/' + digest[:10]
+ return cpath
+
+ @staticmethod
+ def _sshpass_available():
+ global SSHPASS_AVAILABLE
+
+ # We test once if sshpass is available, and remember the result. It
+ # would be nice to use distutils.spawn.find_executable for this, but
+ # distutils isn't always available; shutils.which() is Python3-only.
+
+ if SSHPASS_AVAILABLE is None:
+ try:
+ p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ SSHPASS_AVAILABLE = True
+ except OSError:
+ SSHPASS_AVAILABLE = False
+
+ return SSHPASS_AVAILABLE
+
+ @staticmethod
+ def _persistence_controls(b_command):
+ '''
+ Takes a command array and scans it for ControlPersist and ControlPath
+ settings and returns two booleans indicating whether either was found.
+ This could be smarter, e.g. returning false if ControlPersist is 'no',
+ but for now we do it simple way.
+ '''
+
+ controlpersist = False
+ controlpath = False
+
+ for b_arg in (a.lower() for a in b_command):
+ if b'controlpersist' in b_arg:
+ controlpersist = True
+ elif b'controlpath' in b_arg:
+ controlpath = True
+
+ return controlpersist, controlpath
+
+ def _add_args(self, b_command, b_args, explanation):
+ """
+ Adds arguments to the ssh command and displays a caller-supplied explanation of why.
+
+ :arg b_command: A list containing the command to add the new arguments to.
+ This list will be modified by this method.
+ :arg b_args: An iterable of new arguments to add. This iterable is used
+ more than once so it must be persistent (ie: a list is okay but a
+ StringIO would not)
+ :arg explanation: A text string containing explaining why the arguments
+ were added. It will be displayed with a high enough verbosity.
+ .. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
+ """
+ display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
+ b_command += b_args
+
+ def _build_command(self, binary, subsystem, *other_args):
+ '''
+ Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
+ wrapped in local ssh shell commands and ready for execution.
+
+ :arg binary: actual executable to use to execute command.
+ :arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
+ :arg other_args: dict of, value pairs passed as arguments to the ssh binary
+
+ '''
+
+ b_command = []
+ conn_password = self.get_option('password') or self._play_context.password
+
+ #
+ # First, the command to invoke
+ #
+
+ # If we want to use password authentication, we have to set up a pipe to
+ # write the password to sshpass.
+ pkcs11_provider = self.get_option("pkcs11_provider")
+ if conn_password or pkcs11_provider:
+ if not self._sshpass_available():
+ raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
+ if not conn_password and pkcs11_provider:
+ raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
+
+ self.sshpass_pipe = os.pipe()
+ b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
+
+ password_prompt = self.get_option('sshpass_prompt')
+ if not password_prompt and pkcs11_provider:
+ # Set default password prompt for pkcs11_provider to make it clear its a PIN
+ password_prompt = 'Enter PIN for '
+
+ if password_prompt:
+ b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
+
+ b_command += [to_bytes(binary, errors='surrogate_or_strict')]
+
+ #
+ # Next, additional arguments based on the configuration.
+ #
+
+ # pkcs11 mode allows the use of Smartcards or Yubikey devices
+ if conn_password and pkcs11_provider:
+ self._add_args(b_command,
+ (b"-o", b"KbdInteractiveAuthentication=no",
+ b"-o", b"PreferredAuthentications=publickey",
+ b"-o", b"PasswordAuthentication=no",
+ b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
+ u'Enable pkcs11')
+
+ # sftp batch mode allows us to correctly catch failed transfers, but can
+ # be disabled if the client side doesn't support the option. However,
+ # sftp batch mode does not prompt for passwords so it must be disabled
+ # if not using controlpersist and using sshpass
+ if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
+ if conn_password:
+ b_args = [b'-o', b'BatchMode=no']
+ self._add_args(b_command, b_args, u'disable batch mode for sshpass')
+ b_command += [b'-b', b'-']
+
+ if display.verbosity > 3:
+ b_command.append(b'-vvv')
+
+ # Next, we add ssh_args
+ ssh_args = self.get_option('ssh_args')
+ if ssh_args:
+ b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
+ self._split_ssh_args(ssh_args)]
+ self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
+
+ # Now we add various arguments that have their own specific settings defined in docs above.
+ if self.get_option('host_key_checking') is False:
+ b_args = (b"-o", b"StrictHostKeyChecking=no")
+ self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
+
+ self.port = self.get_option('port')
+ if self.port is not None:
+ b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
+ self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
+
+ key = self.get_option('private_key_file')
+ if key:
+ b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
+ self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
+
+ if not conn_password:
+ self._add_args(
+ b_command, (
+ b"-o", b"KbdInteractiveAuthentication=no",
+ b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+ b"-o", b"PasswordAuthentication=no"
+ ),
+ u"ansible_password/ansible_ssh_password not set"
+ )
+
+ self.user = self.get_option('remote_user')
+ if self.user:
+ self._add_args(
+ b_command,
+ (b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
+ u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
+ )
+
+ timeout = self.get_option('timeout')
+ self._add_args(
+ b_command,
+ (b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
+ u"ANSIBLE_TIMEOUT/timeout set"
+ )
+
+ # Add in any common or binary-specific arguments from the PlayContext
+ # (i.e. inventory or task settings or overrides on the command line).
+
+ for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
+ attr = self.get_option(opt)
+ if attr is not None:
+ b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
+ self._add_args(b_command, b_args, u"Set %s" % opt)
+
+ # Check if ControlPersist is enabled and add a ControlPath if one hasn't
+ # already been set.
+
+ controlpersist, controlpath = self._persistence_controls(b_command)
+
+ if controlpersist:
+ self._persistent = True
+
+ if not controlpath:
+ self.control_path_dir = self.get_option('control_path_dir')
+ cpdir = unfrackpath(self.control_path_dir)
+ b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
+
+ # The directory must exist and be writable.
+ makedirs_safe(b_cpdir, 0o700)
+ if not os.access(b_cpdir, os.W_OK):
+ raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
+
+ self.control_path = self.get_option('control_path')
+ if not self.control_path:
+ self.control_path = self._create_control_path(
+ self.host,
+ self.port,
+ self.user
+ )
+ b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
+ self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
+
+ # Finally, we add any caller-supplied extras.
+ if other_args:
+ b_command += [to_bytes(a) for a in other_args]
+
+ return b_command
+
+ def _send_initial_data(self, fh, in_data, ssh_process):
+ '''
+ Writes initial data to the stdin filehandle of the subprocess and closes
+ it. (The handle must be closed; otherwise, for example, "sftp -b -" will
+ just hang forever waiting for more commands.)
+ '''
+
+ display.debug(u'Sending initial data')
+
+ try:
+ fh.write(to_bytes(in_data))
+ fh.close()
+ except (OSError, IOError) as e:
+ # The ssh connection may have already terminated at this point, with a more useful error
+ # Only raise AnsibleConnectionFailure if the ssh process is still alive
+ time.sleep(0.001)
+ ssh_process.poll()
+ if getattr(ssh_process, 'returncode', None) is None:
+ raise AnsibleConnectionFailure(
+ 'Data could not be sent to remote host "%s". Make sure this host can be reached '
+ 'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
+ )
+
+ display.debug(u'Sent initial data (%d bytes)' % len(in_data))
+
+ # Used by _run() to kill processes on failures
+ @staticmethod
+ def _terminate_process(p):
+ """ Terminate a process, ignoring errors """
+ try:
+ p.terminate()
+ except (OSError, IOError):
+ pass
+
+ # This is separate from _run() because we need to do the same thing for stdout
+ # and stderr.
+ def _examine_output(self, source, state, b_chunk, sudoable):
+ '''
+ Takes a string, extracts complete lines from it, tests to see if they
+ are a prompt, error message, etc., and sets appropriate flags in self.
+ Prompt and success lines are removed.
+
+ Returns the processed (i.e. possibly-edited) output and the unprocessed
+ remainder (to be processed with the next chunk) as strings.
+ '''
+
+ output = []
+ for b_line in b_chunk.splitlines(True):
+ display_line = to_text(b_line).rstrip('\r\n')
+ suppress_output = False
+
+ # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
+ if SSH_DEBUG.match(display_line):
+ # skip lines from ssh debug output to avoid false matches
+ pass
+ elif self.become.expect_prompt() and self.become.check_password_prompt(b_line):
+ display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_prompt'] = True
+ suppress_output = True
+ elif self.become.success and self.become.check_success(b_line):
+ display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_success'] = True
+ suppress_output = True
+ elif sudoable and self.become.check_incorrect_password(b_line):
+ display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_error'] = True
+ elif sudoable and self.become.check_missing_password(b_line):
+ display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_nopasswd_error'] = True
+
+ if not suppress_output:
+ output.append(b_line)
+
+ # The chunk we read was most likely a series of complete lines, but just
+ # in case the last line was incomplete (and not a prompt, which we would
+ # have removed from the output), we retain it to be processed with the
+ # next chunk.
+
+ remainder = b''
+ if output and not output[-1].endswith(b'\n'):
+ remainder = output[-1]
+ output = output[:-1]
+
+ return b''.join(output), remainder
+
+ def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
+ '''
+ Starts the command and communicates with it until it ends.
+ '''
+
+ # We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
+ display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
+ display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
+
+ # Start the given command. If we don't need to pipeline data, we can try
+ # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
+ # pipelining data, or can't create a pty, we fall back to using plain
+ # old pipes.
+
+ p = None
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = list(map(to_bytes, cmd))
+
+ conn_password = self.get_option('password') or self._play_context.password
+
+ if not in_data:
+ try:
+ # Make sure stdin is a proper pty to avoid tcgetattr errors
+ master, slave = pty.openpty()
+ if PY3 and conn_password:
+ # pylint: disable=unexpected-keyword-arg
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
+ else:
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = os.fdopen(master, 'wb', 0)
+ os.close(slave)
+ except (OSError, IOError):
+ p = None
+
+ if not p:
+ try:
+ if PY3 and conn_password:
+ # pylint: disable=unexpected-keyword-arg
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
+ else:
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdin = p.stdin
+ except (OSError, IOError) as e:
+ raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
+
+ # If we are using SSH password authentication, write the password into
+ # the pipe we opened in _build_command.
+
+ if conn_password:
+ os.close(self.sshpass_pipe[0])
+ try:
+ os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
+ except OSError as e:
+ # Ignore broken pipe errors if the sshpass process has exited.
+ if e.errno != errno.EPIPE or p.poll() is None:
+ raise
+ os.close(self.sshpass_pipe[1])
+
+ #
+ # SSH state machine
+ #
+
+ # Now we read and accumulate output from the running process until it
+ # exits. Depending on the circumstances, we may also need to write an
+ # escalation password and/or pipelined input to the process.
+
+ states = [
+ 'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
+ ]
+
+ # Are we requesting privilege escalation? Right now, we may be invoked
+ # to execute sftp/scp with sudoable=True, but we can request escalation
+ # only when using ssh. Otherwise we can send initial data straightaway.
+
+ state = states.index('ready_to_send')
+ if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
+ prompt = getattr(self.become, 'prompt', None)
+ if prompt:
+ # We're requesting escalation with a password, so we have to
+ # wait for a password prompt.
+ state = states.index('awaiting_prompt')
+ display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
+ elif self.become and self.become.success:
+ # We're requesting escalation without a password, so we have to
+ # detect success/failure before sending any initial data.
+ state = states.index('awaiting_escalation')
+ display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
+
+ # We store accumulated stdout and stderr output from the process here,
+ # but strip any privilege escalation prompt/confirmation lines first.
+ # Output is accumulated into tmp_*, complete lines are extracted into
+ # an array, then checked and removed or copied to stdout or stderr. We
+ # set any flags based on examining the output in self._flags.
+
+ b_stdout = b_stderr = b''
+ b_tmp_stdout = b_tmp_stderr = b''
+
+ self._flags = dict(
+ become_prompt=False, become_success=False,
+ become_error=False, become_nopasswd_error=False
+ )
+
+ # select timeout should be longer than the connect timeout, otherwise
+ # they will race each other when we can't connect, and the connect
+ # timeout usually fails
+ timeout = 2 + self.get_option('timeout')
+ for fd in (p.stdout, p.stderr):
+ fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # TODO: bcoca would like to use SelectSelector() when open
+ # select is faster when filehandles is low and we only ever handle 1.
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ # If we can send initial data without waiting for anything, we do so
+ # before we start polling
+ if states[state] == 'ready_to_send' and in_data:
+ self._send_initial_data(stdin, in_data, p)
+ state += 1
+
+ try:
+ while True:
+ poll = p.poll()
+ events = selector.select(timeout)
+
+ # We pay attention to timeouts only while negotiating a prompt.
+
+ if not events:
+ # We timed out
+ if state <= states.index('awaiting_escalation'):
+ # If the process has already exited, then it's not really a
+ # timeout; we'll let the normal error handling deal with it.
+ if poll is not None:
+ break
+ self._terminate_process(p)
+ raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
+
+ # Read whatever output is available on stdout and stderr, and stop
+ # listening to the pipe if it's been closed.
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ b_chunk = p.stdout.read()
+ if b_chunk == b'':
+ # stdout has been closed, stop watching it
+ selector.unregister(p.stdout)
+ # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
+ # first connection goes into the background and we never see EOF
+ # on stderr. If we see EOF on stdout, lower the select timeout
+ # to reduce the time wasted selecting on stderr if we observe
+ # that the process has not yet existed after this EOF. Otherwise
+ # we may spend a long timeout period waiting for an EOF that is
+ # not going to arrive until the persisted connection closes.
+ timeout = 1
+ b_tmp_stdout += b_chunk
+ display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
+ elif key.fileobj == p.stderr:
+ b_chunk = p.stderr.read()
+ if b_chunk == b'':
+ # stderr has been closed, stop watching it
+ selector.unregister(p.stderr)
+ b_tmp_stderr += b_chunk
+ display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
+
+ # We examine the output line-by-line until we have negotiated any
+ # privilege escalation prompt and subsequent success/error message.
+ # Afterwards, we can accumulate output without looking at it.
+
+ if state < states.index('ready_to_send'):
+ if b_tmp_stdout:
+ b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
+ b_stdout += b_output
+ b_tmp_stdout = b_unprocessed
+
+ if b_tmp_stderr:
+ b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
+ b_stderr += b_output
+ b_tmp_stderr = b_unprocessed
+ else:
+ b_stdout += b_tmp_stdout
+ b_stderr += b_tmp_stderr
+ b_tmp_stdout = b_tmp_stderr = b''
+
+ # If we see a privilege escalation prompt, we send the password.
+ # (If we're expecting a prompt but the escalation succeeds, we
+ # didn't need the password and can carry on regardless.)
+
+ if states[state] == 'awaiting_prompt':
+ if self._flags['become_prompt']:
+ display.debug(u'Sending become_password in response to prompt')
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ # On python3 stdin is a BufferedWriter, and we don't have a guarantee
+ # that the write will happen without a flush
+ stdin.flush()
+ self._flags['become_prompt'] = False
+ state += 1
+ elif self._flags['become_success']:
+ state += 1
+
+ # We've requested escalation (with or without a password), now we
+ # wait for an error message or a successful escalation.
+
+ if states[state] == 'awaiting_escalation':
+ if self._flags['become_success']:
+ display.vvv(u'Escalation succeeded')
+ self._flags['become_success'] = False
+ state += 1
+ elif self._flags['become_error']:
+ display.vvv(u'Escalation failed')
+ self._terminate_process(p)
+ self._flags['become_error'] = False
+ raise AnsibleError('Incorrect %s password' % self.become.name)
+ elif self._flags['become_nopasswd_error']:
+ display.vvv(u'Escalation requires password')
+ self._terminate_process(p)
+ self._flags['become_nopasswd_error'] = False
+ raise AnsibleError('Missing %s password' % self.become.name)
+ elif self._flags['become_prompt']:
+ # This shouldn't happen, because we should see the "Sorry,
+ # try again" message first.
+ display.vvv(u'Escalation prompt repeated')
+ self._terminate_process(p)
+ self._flags['become_prompt'] = False
+ raise AnsibleError('Incorrect %s password' % self.become.name)
+
+ # Once we're sure that the privilege escalation prompt, if any, has
+ # been dealt with, we can send any initial data and start waiting
+ # for output.
+
+ if states[state] == 'ready_to_send':
+ if in_data:
+ self._send_initial_data(stdin, in_data, p)
+ state += 1
+
+ # Now we're awaiting_exit: has the child process exited? If it has,
+ # and we've read all available output from it, we're done.
+
+ if poll is not None:
+ if not selector.get_map() or not events:
+ break
+ # We should not see further writes to the stdout/stderr file
+ # descriptors after the process has closed, set the select
+ # timeout to gather any last writes we may have missed.
+ timeout = 0
+ continue
+
+ # If the process has not yet exited, but we've already read EOF from
+ # its stdout and stderr (and thus no longer watching any file
+ # descriptors), we can just wait for it to exit.
+
+ elif not selector.get_map():
+ p.wait()
+ break
+
+ # Otherwise there may still be outstanding data to read.
+ finally:
+ selector.close()
+ # close stdin, stdout, and stderr after process is terminated and
+ # stdout/stderr are read completely (see also issues #848, #64768).
+ stdin.close()
+ p.stdout.close()
+ p.stderr.close()
+
+ if self.get_option('host_key_checking'):
+ if cmd[0] == b"sshpass" and p.returncode == 6:
+ raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
+ 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+ controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
+ if p.returncode != 0 and controlpersisterror:
+ raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
+ '(or ssh_args in [ssh_connection] section of the config file) before running again')
+
+ # If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
+ # we raise a special exception so that we can retry a connection.
+ controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
+ if p.returncode == 255:
+
+ additional = to_native(b_stderr)
+ if controlpersist_broken_pipe:
+ raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
+
+ elif in_data and checkrc:
+ raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
+ % (self.host, additional))
+
+ return (p.returncode, b_stdout, b_stderr)
+
+ @_ssh_retry
+ def _run(self, cmd, in_data, sudoable=True, checkrc=True):
+ """Wrapper around _bare_run that retries the connection
+ """
+ return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
+
+ @_ssh_retry
+ def _file_transport_command(self, in_path, out_path, sftp_action):
+ # scp and sftp require square brackets for IPv6 addresses, but
+ # accept them for hostnames and IPv4 addresses too.
+ host = '[%s]' % self.host
+
+ smart_methods = ['sftp', 'scp', 'piped']
+
+ # Windows does not support dd so we cannot use the piped method
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ smart_methods.remove('piped')
+
+ # Transfer methods to try
+ methods = []
+
+ # Use the transfer_method option if set, otherwise use scp_if_ssh
+ ssh_transfer_method = self.get_option('ssh_transfer_method')
+ scp_if_ssh = self.get_option('scp_if_ssh')
+ if ssh_transfer_method is None and scp_if_ssh == 'smart':
+ ssh_transfer_method = 'smart'
+
+ if ssh_transfer_method is not None:
+ if ssh_transfer_method == 'smart':
+ methods = smart_methods
+ else:
+ methods = [ssh_transfer_method]
+ else:
+ # since this can be a non-bool now, we need to handle it correctly
+ if not isinstance(scp_if_ssh, bool):
+ scp_if_ssh = scp_if_ssh.lower()
+ if scp_if_ssh in BOOLEANS:
+ scp_if_ssh = boolean(scp_if_ssh, strict=False)
+ elif scp_if_ssh != 'smart':
+ raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
+ if scp_if_ssh == 'smart':
+ methods = smart_methods
+ elif scp_if_ssh is True:
+ methods = ['scp']
+ else:
+ methods = ['sftp']
+
+ for method in methods:
+ returncode = stdout = stderr = None
+ if method == 'sftp':
+ cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
+ in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
+ in_data = to_bytes(in_data, nonstring='passthru')
+ (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
+ elif method == 'scp':
+ scp = self.get_option('scp_executable')
+
+ if sftp_action == 'get':
+ cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
+ else:
+ cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
+ in_data = None
+ (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
+ elif method == 'piped':
+ if sftp_action == 'get':
+ # we pass sudoable=False to disable pty allocation, which
+ # would end up mixing stdout/stderr and screwing with newlines
+ (returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ out_file.write(stdout)
+ else:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
+ in_data = to_bytes(f.read(), nonstring='passthru')
+ if not in_data:
+ count = ' count=0'
+ else:
+ count = ''
+ (returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
+
+ # Check the return code and rollover to next method if failed
+ if returncode == 0:
+ return (returncode, stdout, stderr)
+ else:
+ # If not in smart mode, the data will be printed by the raise below
+ if len(methods) > 1:
+ display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
+ display.debug(u'%s' % to_text(stdout))
+ display.debug(u'%s' % to_text(stderr))
+
+ if returncode == 255:
+ raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
+ else:
+ raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def _escape_win_path(self, path):
+ """ converts a Windows path to one that's supported by SFTP and SCP """
+ # If using a root path then we need to start with /
+ prefix = ""
+ if re.match(r'^\w{1}:', path):
+ prefix = "/"
+
+ # Convert all '\' to '/'
+ return "%s%s" % (prefix, path.replace("\\", "/"))
+
+ #
+ # Main public methods
+ #
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the remote host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self.host = self.get_option('host') or self._play_context.remote_addr
+
+ display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ # Become method 'runas' is done in the wrapper that is executed,
+ # need to disable sudoable so the bare_run is not waiting for a
+ # prompt that will not occur
+ sudoable = False
+
+ # Make sure our first command is to set the console encoding to
+ # utf-8, this must be done via chcp to get utf-8 (65001)
+ cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
+ cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
+ cmd = ' '.join(cmd_parts)
+
+ # we can only use tty when we are not pipelining the modules. piping
+ # data into /usr/bin/python inside a tty automatically invokes the
+ # python interactive-mode but the modules are not compatible with the
+ # interactive-mode ("unexpected indent" mainly because of empty lines)
+
+ ssh_executable = self.get_option('ssh_executable')
+
+ # -tt can cause various issues in some environments so allow the user
+ # to disable it as a troubleshooting method.
+ use_tty = self.get_option('use_tty')
+
+ if not in_data and sudoable and use_tty:
+ args = ('-tt', self.host, cmd)
+ else:
+ args = (self.host, cmd)
+
+ cmd = self._build_command(ssh_executable, 'ssh', *args)
+ (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
+
+ # When running on Windows, stderr may contain CLIXML encoded output
+ if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+
+ return (returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ self.host = self.get_option('host') or self._play_context.remote_addr
+
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ out_path = self._escape_win_path(out_path)
+
+ return self._file_transport_command(in_path, out_path, 'put')
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self.host = self.get_option('host') or self._play_context.remote_addr
+
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+
+ # need to add / if path is rooted
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ in_path = self._escape_win_path(in_path)
+
+ return self._file_transport_command(in_path, out_path, 'get')
+
+ def reset(self):
+
+ run_reset = False
+ self.host = self.get_option('host') or self._play_context.remote_addr
+
+ # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
+ # only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
+ # 'check' will determine this.
+ cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
+ display.vvv(u'sending connection check: %s' % to_text(cmd))
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ status_code = p.wait()
+ if status_code != 0:
+ display.vvv(u"No connection to reset: %s" % to_text(stderr))
+ else:
+ run_reset = True
+
+ if run_reset:
+ cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
+ display.vvv(u'sending connection stop: %s' % to_text(cmd))
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ status_code = p.wait()
+ if status_code != 0:
+ display.warning(u"Failed to reset connection:%s" % to_text(stderr))
+
+ self.close()
+
+ def close(self):
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
new file mode 100644
index 0000000..13c80ec
--- /dev/null
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -0,0 +1,755 @@
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Ansible Core Team
+ name: winrm
+ short_description: Run tasks over Microsoft's WinRM
+ description:
+ - Run commands or put/fetch on a target via WinRM
+ - This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
+ They should take the form of variables declared with the following pattern C(ansible_winrm_<option>).
+ version_added: "2.0"
+ extends_documentation_fragment:
+ - connection_pipelining
+ requirements:
+ - pywinrm (python library)
+ options:
+ # figure out more elegant 'delegation'
+ remote_addr:
+ description:
+ - Address of the windows machine
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_winrm_host
+ type: str
+ remote_user:
+ description:
+ - The user to log in as to the Windows machine
+ vars:
+ - name: ansible_user
+ - name: ansible_winrm_user
+ keyword:
+ - name: remote_user
+ type: str
+ remote_password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ vars:
+ - name: ansible_password
+ - name: ansible_winrm_pass
+ - name: ansible_winrm_password
+ type: str
+ aliases:
+ - password # Needed for --ask-pass to come through on delegation
+ port:
+ description:
+ - port for winrm to connect on remote target
+ - The default is the https (5986) port, if using http it should be 5985
+ vars:
+ - name: ansible_port
+ - name: ansible_winrm_port
+ default: 5986
+ keyword:
+ - name: port
+ type: integer
+ scheme:
+ description:
+ - URI scheme to use
+ - If not set, then will default to C(https) or C(http) if I(port) is
+ C(5985).
+ choices: [http, https]
+ vars:
+ - name: ansible_winrm_scheme
+ type: str
+ path:
+ description: URI path to connect to
+ default: '/wsman'
+ vars:
+ - name: ansible_winrm_path
+ type: str
+ transport:
+ description:
+ - List of winrm transports to attempt to use (ssl, plaintext, kerberos, etc)
+ - If None (the default) the plugin will try to automatically guess the correct list
+ - The choices available depend on your version of pywinrm
+ type: list
+ elements: string
+ vars:
+ - name: ansible_winrm_transport
+ kerberos_command:
+ description: kerberos command to use to request a authentication ticket
+ default: kinit
+ vars:
+ - name: ansible_winrm_kinit_cmd
+ type: str
+ kinit_args:
+ description:
+ - Extra arguments to pass to C(kinit) when getting the Kerberos authentication ticket.
+ - By default no extra arguments are passed into C(kinit) unless I(ansible_winrm_kerberos_delegation) is also
+ set. In that case C(-f) is added to the C(kinit) args so a forwardable ticket is retrieved.
+ - If set, the args will overwrite any existing defaults for C(kinit), including C(-f) for a delegated ticket.
+ type: str
+ vars:
+ - name: ansible_winrm_kinit_args
+ version_added: '2.11'
+ kinit_env_vars:
+ description:
+ - A list of environment variables to pass through to C(kinit) when getting the Kerberos authentication ticket.
+ - By default no environment variables are passed through and C(kinit) is run with a blank slate.
+ - The environment variable C(KRB5CCNAME) cannot be specified here as it's used to store the temp Kerberos
+ ticket used by WinRM.
+ type: list
+ elements: str
+ default: []
+ ini:
+ - section: winrm
+ key: kinit_env_vars
+ vars:
+ - name: ansible_winrm_kinit_env_vars
+ version_added: '2.12'
+ kerberos_mode:
+ description:
+ - kerberos usage mode.
+ - The managed option means Ansible will obtain kerberos ticket.
+ - While the manual one means a ticket must already have been obtained by the user.
+ - If having issues with Ansible freezing when trying to obtain the
+ Kerberos ticket, you can either set this to C(manual) and obtain
+ it outside Ansible or install C(pexpect) through pip and try
+ again.
+ choices: [managed, manual]
+ vars:
+ - name: ansible_winrm_kinit_mode
+ type: str
+ connection_timeout:
+ description:
+ - Sets the operation and read timeout settings for the WinRM
+ connection.
+ - Corresponds to the C(operation_timeout_sec) and
+ C(read_timeout_sec) args in pywinrm so avoid setting these vars
+ with this one.
+ - The default value is whatever is set in the installed version of
+ pywinrm.
+ vars:
+ - name: ansible_winrm_connection_timeout
+ type: int
+"""
+
+import base64
+import logging
+import os
+import re
+import traceback
+import json
+import tempfile
+import shlex
+import subprocess
+
+from inspect import getfullargspec
+from urllib.parse import urlunsplit
+
+HAVE_KERBEROS = False
+try:
+ import kerberos
+ HAVE_KERBEROS = True
+except ImportError:
+ pass
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils.json_utils import _filter_non_json_lines
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import binary_type
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _parse_clixml
+from ansible.utils.hashing import secure_hash
+from ansible.utils.display import Display
+
+
+try:
+ import winrm
+ from winrm import Response
+ from winrm.protocol import Protocol
+ import requests.exceptions
+ HAS_WINRM = True
+ WINRM_IMPORT_ERR = None
+except ImportError as e:
+ HAS_WINRM = False
+ WINRM_IMPORT_ERR = e
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+ XMLTODICT_IMPORT_ERR = None
+except ImportError as e:
+ HAS_XMLTODICT = False
+ XMLTODICT_IMPORT_ERR = e
+
+HAS_PEXPECT = False
+try:
+ import pexpect
+ # echo was added in pexpect 3.3+ which is newer than the RHEL package
+ # we can only use pexpect for kerb auth if echo is a valid kwarg
+ # https://github.com/ansible/ansible/issues/43462
+ if hasattr(pexpect, 'spawn'):
+ argspec = getfullargspec(pexpect.spawn.__init__)
+ if 'echo' in argspec.args:
+ HAS_PEXPECT = True
+except ImportError as e:
+ pass
+
+# used to try and parse the hostname and detect if IPv6 is being used
+try:
+ import ipaddress
+ HAS_IPADDRESS = True
+except ImportError:
+ HAS_IPADDRESS = False
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ '''WinRM connections over HTTP/HTTPS.'''
+
+ transport = 'winrm'
+ module_implementation_preferences = ('.ps1', '.exe', '')
+ allow_executable = False
+ has_pipelining = True
+ allow_extras = True
+
+ def __init__(self, *args, **kwargs):
+
+ self.always_pipeline_modules = True
+ self.has_native_async = True
+
+ self.protocol = None
+ self.shell_id = None
+ self.delegate = None
+ self._shell_type = 'powershell'
+
+ super(Connection, self).__init__(*args, **kwargs)
+
+ if not C.DEFAULT_DEBUG:
+ logging.getLogger('requests_credssp').setLevel(logging.INFO)
+ logging.getLogger('requests_kerberos').setLevel(logging.INFO)
+ logging.getLogger('urllib3').setLevel(logging.INFO)
+
+ def _build_winrm_kwargs(self):
+ # this used to be in set_options, as win_reboot needs to be able to
+ # override the conn timeout, we need to be able to build the args
+ # after setting individual options. This is called by _connect before
+ # starting the WinRM connection
+ self._winrm_host = self.get_option('remote_addr')
+ self._winrm_user = self.get_option('remote_user')
+ self._winrm_pass = self.get_option('remote_password')
+
+ self._winrm_port = self.get_option('port')
+
+ self._winrm_scheme = self.get_option('scheme')
+ # old behaviour, scheme should default to http if not set and the port
+ # is 5985 otherwise https
+ if self._winrm_scheme is None:
+ self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
+
+ self._winrm_path = self.get_option('path')
+ self._kinit_cmd = self.get_option('kerberos_command')
+ self._winrm_transport = self.get_option('transport')
+ self._winrm_connection_timeout = self.get_option('connection_timeout')
+
+ if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
+ self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
+ else:
+ # for legacy versions of pywinrm, use the values we know are supported
+ self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
+
+ # calculate transport if needed
+ if self._winrm_transport is None or self._winrm_transport[0] is None:
+ # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
+ transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
+
+ if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
+ self._winrm_transport = ['kerberos'] + transport_selector
+ else:
+ self._winrm_transport = transport_selector
+
+ unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
+
+ if unsupported_transports:
+ raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
+ to_native(list(unsupported_transports), nonstring='simplerepr'))
+
+ # if kerberos is among our transports and there's a password specified, we're managing the tickets
+ kinit_mode = self.get_option('kerberos_mode')
+ if kinit_mode is None:
+ # HACK: ideally, remove multi-transport stuff
+ self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "")
+ elif kinit_mode == "managed":
+ self._kerb_managed = True
+ elif kinit_mode == "manual":
+ self._kerb_managed = False
+
+ # arg names we're going passing directly
+ internal_kwarg_mask = {'self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'}
+
+ self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
+ argspec = getfullargspec(Protocol.__init__)
+ supported_winrm_args = set(argspec.args)
+ supported_winrm_args.update(internal_kwarg_mask)
+ passed_winrm_args = {v.replace('ansible_winrm_', '') for v in self.get_option('_extras')}
+ unsupported_args = passed_winrm_args.difference(supported_winrm_args)
+
+ # warn for kwargs unsupported by the installed version of pywinrm
+ for arg in unsupported_args:
+ display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
+
+ # pass through matching extras, excluding the list we want to treat specially
+ for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
+ self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
+
+ # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
+ # auth itself with a private CCACHE.
+ def _kerb_auth(self, principal, password):
+ if password is None:
+ password = ""
+
+ self._kerb_ccache = tempfile.NamedTemporaryFile()
+ display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
+ krb5ccname = "FILE:%s" % self._kerb_ccache.name
+ os.environ["KRB5CCNAME"] = krb5ccname
+ krb5env = dict(PATH=os.environ["PATH"], KRB5CCNAME=krb5ccname)
+
+ # Add any explicit environment vars into the krb5env block
+ kinit_env_vars = self.get_option('kinit_env_vars')
+ for var in kinit_env_vars:
+ if var not in krb5env and var in os.environ:
+ krb5env[var] = os.environ[var]
+
+ # Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR
+ # '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation).
+ kinit_cmdline = [self._kinit_cmd]
+ kinit_args = self.get_option('kinit_args')
+ if kinit_args:
+ kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()]
+ kinit_cmdline.extend(kinit_args)
+
+ elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
+ kinit_cmdline.append('-f')
+
+ kinit_cmdline.append(principal)
+
+ # pexpect runs the process in its own pty so it can correctly send
+ # the password as input even on MacOS which blocks subprocess from
+ # doing so. Unfortunately it is not available on the built in Python
+ # so we can only use it if someone has installed it
+ if HAS_PEXPECT:
+ proc_mechanism = "pexpect"
+ command = kinit_cmdline.pop(0)
+ password = to_text(password, encoding='utf-8',
+ errors='surrogate_or_strict')
+
+ display.vvvv("calling kinit with pexpect for principal %s"
+ % principal)
+ try:
+ child = pexpect.spawn(command, kinit_cmdline, timeout=60,
+ env=krb5env, echo=False)
+ except pexpect.ExceptionPexpect as err:
+ err_msg = "Kerberos auth failure when calling kinit cmd " \
+ "'%s': %s" % (command, to_native(err))
+ raise AnsibleConnectionFailure(err_msg)
+
+ try:
+ child.expect(".*:")
+ child.sendline(password)
+ except OSError as err:
+ # child exited before the pass was sent, Ansible will raise
+ # error based on the rc below, just display the error here
+ display.vvvv("kinit with pexpect raised OSError: %s"
+ % to_native(err))
+
+ # technically this is the stdout + stderr but to match the
+ # subprocess error checking behaviour, we will call it stderr
+ stderr = child.read()
+ child.wait()
+ rc = child.exitstatus
+ else:
+ proc_mechanism = "subprocess"
+ password = to_bytes(password, encoding='utf-8',
+ errors='surrogate_or_strict')
+
+ display.vvvv("calling kinit with subprocess for principal %s"
+ % principal)
+ try:
+ p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=krb5env)
+
+ except OSError as err:
+ err_msg = "Kerberos auth failure when calling kinit cmd " \
+ "'%s': %s" % (self._kinit_cmd, to_native(err))
+ raise AnsibleConnectionFailure(err_msg)
+
+ stdout, stderr = p.communicate(password + b'\n')
+ rc = p.returncode != 0
+
+ if rc != 0:
+ # one last attempt at making sure the password does not exist
+ # in the output
+ exp_msg = to_native(stderr.strip())
+ exp_msg = exp_msg.replace(to_native(password), "<redacted>")
+
+ err_msg = "Kerberos auth failure for principal %s with %s: %s" \
+ % (principal, proc_mechanism, exp_msg)
+ raise AnsibleConnectionFailure(err_msg)
+
+ display.vvvvv("kinit succeeded for principal %s" % principal)
+
+ def _winrm_connect(self):
+ '''
+ Establish a WinRM connection over HTTP/HTTPS.
+ '''
+ display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
+ (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
+
+ winrm_host = self._winrm_host
+ if HAS_IPADDRESS:
+ display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host)
+ try:
+ ipaddress.IPv6Address(winrm_host)
+ except ipaddress.AddressValueError:
+ pass
+ else:
+ winrm_host = "[%s]" % winrm_host
+
+ netloc = '%s:%d' % (winrm_host, self._winrm_port)
+ endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
+ errors = []
+ for transport in self._winrm_transport:
+ if transport == 'kerberos':
+ if not HAVE_KERBEROS:
+ errors.append('kerberos: the python kerberos library is not installed')
+ continue
+ if self._kerb_managed:
+ self._kerb_auth(self._winrm_user, self._winrm_pass)
+ display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
+ try:
+ winrm_kwargs = self._winrm_kwargs.copy()
+ if self._winrm_connection_timeout:
+ winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
+ winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1
+ protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
+
+ # open the shell from connect so we know we're able to talk to the server
+ if not self.shell_id:
+ self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
+ display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
+
+ return protocol
+ except Exception as e:
+ err_msg = to_text(e).strip()
+ if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
+ raise AnsibleError('the connection attempt timed out')
+ m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
+ if m:
+ code = int(m.groups()[0])
+ if code == 401:
+ err_msg = 'the specified credentials were rejected by the server'
+ elif code == 411:
+ return protocol
+ errors.append(u'%s: %s' % (transport, err_msg))
+ display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
+ if errors:
+ raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
+ else:
+ raise AnsibleError('No transport found for WinRM connection')
+
+ def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
+ rq = {'env:Envelope': protocol._get_soap_header(
+ resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
+ action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
+ shell_id=shell_id)}
+ stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
+ .setdefault('rsp:Stream', {})
+ stream['@Name'] = 'stdin'
+ stream['@CommandId'] = command_id
+ stream['#text'] = base64.b64encode(to_bytes(stdin))
+ if eof:
+ stream['@End'] = 'true'
+ protocol.send_message(xmltodict.unparse(rq))
+
+ def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
+ if not self.protocol:
+ self.protocol = self._winrm_connect()
+ self._connected = True
+ if from_exec:
+ display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ else:
+ display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ command_id = None
+ try:
+ stdin_push_failed = False
+ command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
+
+ try:
+ if stdin_iterator:
+ for (data, is_last) in stdin_iterator:
+ self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
+
+ except Exception as ex:
+ display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s"
+ % (type(ex).__name__, to_text(ex)))
+ display.debug(traceback.format_exc())
+ stdin_push_failed = True
+
+ # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
+ # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
+ resptuple = self.protocol.get_command_output(self.shell_id, command_id)
+ # ensure stdout/stderr are text for py3
+ # FUTURE: this should probably be done internally by pywinrm
+ response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
+
+ # TODO: check result from response and set stdin_push_failed if we have nonzero
+ if from_exec:
+ display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
+ else:
+ display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
+
+ display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
+ display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
+
+ if stdin_push_failed:
+ # There are cases where the stdin input failed but the WinRM service still processed it. We attempt to
+ # see if stdout contains a valid json return value so we can ignore this error
+ try:
+ filtered_output, dummy = _filter_non_json_lines(response.std_out)
+ json.loads(filtered_output)
+ except ValueError:
+ # stdout does not contain a return response, stdin input was a fatal error
+ stderr = to_bytes(response.std_err, encoding='utf-8')
+ if stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+
+ raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s'
+ % (to_native(response.std_out), to_native(stderr)))
+
+ return response
+ except requests.exceptions.Timeout as exc:
+ raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc))
+ finally:
+ if command_id:
+ self.protocol.cleanup_command(self.shell_id, command_id)
+
+ def _connect(self):
+
+ if not HAS_WINRM:
+ raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
+ elif not HAS_XMLTODICT:
+ raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
+
+ super(Connection, self)._connect()
+ if not self.protocol:
+ self._build_winrm_kwargs() # build the kwargs from the options set
+ self.protocol = self._winrm_connect()
+ self._connected = True
+ return self
+
+ def reset(self):
+ if not self._connected:
+ return
+ self.protocol = None
+ self.shell_id = None
+ self._connect()
+
+ def _wrapper_payload_stream(self, payload, buffer_size=200000):
+ payload_bytes = to_bytes(payload)
+ byte_count = len(payload_bytes)
+ for i in range(0, byte_count, buffer_size):
+ yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+ cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
+
+ # TODO: display something meaningful here
+ display.vvv("EXEC (via pipeline wrapper)")
+
+ stdin_iterator = None
+
+ if in_data:
+ stdin_iterator = self._wrapper_payload_stream(in_data)
+
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
+
+ result.std_out = to_bytes(result.std_out)
+ result.std_err = to_bytes(result.std_err)
+
+ # parse just stderr from CLIXML output
+ if result.std_err.startswith(b"#< CLIXML"):
+ try:
+ result.std_err = _parse_clixml(result.std_err)
+ except Exception:
+ # unsure if we're guaranteed a valid xml doc- use raw output in case of error
+ pass
+
+ return (result.status_code, result.std_out, result.std_err)
+
+ # FUTURE: determine buffer size at runtime via remote winrm config?
+ def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
+ in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
+ offset = 0
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ for out_data in iter((lambda: in_file.read(buffer_size)), b''):
+ offset += len(out_data)
+ self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
+ # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
+ b64_data = base64.b64encode(out_data) + b'\r\n'
+ # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
+ yield b64_data, (in_file.tell() == in_size)
+
+ if offset == 0: # empty file, return an empty buffer + eof to close it
+ yield "", True
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+ out_path = self._shell._unquote(out_path)
+ display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
+
+ script_template = u'''
+ begin {{
+ $path = '{0}'
+
+ $DebugPreference = "Continue"
+ $ErrorActionPreference = "Stop"
+ Set-StrictMode -Version 2
+
+ $fd = [System.IO.File]::Create($path)
+
+ $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+
+ $bytes = @() #initialize for empty file case
+ }}
+ process {{
+ $bytes = [System.Convert]::FromBase64String($input)
+ $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
+ $fd.Write($bytes, 0, $bytes.Length)
+ }}
+ end {{
+ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
+
+ $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
+
+ $fd.Close()
+
+ Write-Output "{{""sha1"":""$hash""}}"
+ }}
+ '''
+
+ script = script_template.format(self._shell._escape(out_path))
+ cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
+
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
+ # TODO: improve error handling
+ if result.status_code != 0:
+ raise AnsibleError(to_native(result.std_err))
+
+ try:
+ put_output = json.loads(result.std_out)
+ except ValueError:
+ # stdout does not contain a valid response
+ stderr = to_bytes(result.std_err, encoding='utf-8')
+ if stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+ raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (to_native(result.std_out), to_native(stderr)))
+
+ remote_sha1 = put_output.get("sha1")
+ if not remote_sha1:
+ raise AnsibleError("Remote sha1 was not returned")
+
+ local_sha1 = secure_hash(in_path)
+
+ if not remote_sha1 == local_sha1:
+ raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+ in_path = self._shell._unquote(in_path)
+ out_path = out_path.replace('\\', '/')
+ # consistent with other connection plugins, we assume the caller has created the target dir
+ display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
+ buffer_size = 2**19 # 0.5MB chunks
+ out_file = None
+ try:
+ offset = 0
+ while True:
+ try:
+ script = '''
+ $path = '%(path)s'
+ If (Test-Path -Path $path -PathType Leaf)
+ {
+ $buffer_size = %(buffer_size)d
+ $offset = %(offset)d
+
+ $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
+ $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
+ $buffer = New-Object -TypeName byte[] $buffer_size
+ $bytes_read = $stream.Read($buffer, 0, $buffer_size)
+ if ($bytes_read -gt 0) {
+ $bytes = $buffer[0..($bytes_read - 1)]
+ [System.Convert]::ToBase64String($bytes)
+ }
+ $stream.Close() > $null
+ }
+ ElseIf (Test-Path -Path $path -PathType Container)
+ {
+ Write-Host "[DIR]";
+ }
+ Else
+ {
+ Write-Error "$path does not exist";
+ Exit 1;
+ }
+ ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
+ display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
+ cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
+ if result.status_code != 0:
+ raise IOError(to_native(result.std_err))
+ if result.std_out.strip() == '[DIR]':
+ data = None
+ else:
+ data = base64.b64decode(result.std_out.strip())
+ if data is None:
+ break
+ else:
+ if not out_file:
+ # If out_path is a directory and we're expecting a file, bail out now.
+ if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
+ break
+ out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
+ out_file.write(data)
+ if len(data) < buffer_size:
+ break
+ offset += len(data)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
+ finally:
+ if out_file:
+ out_file.close()
+
+ def close(self):
+ if self.protocol and self.shell_id:
+ display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
+ self.protocol.close_shell(self.shell_id)
+ self.shell_id = None
+ self.protocol = None
+ self._connected = False
diff --git a/lib/ansible/plugins/doc_fragments/__init__.py b/lib/ansible/plugins/doc_fragments/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/__init__.py
diff --git a/lib/ansible/plugins/doc_fragments/action_common_attributes.py b/lib/ansible/plugins/doc_fragments/action_common_attributes.py
new file mode 100644
index 0000000..c135df5
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/action_common_attributes.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+attributes:
+ check_mode:
+ description: Can run in check_mode and return changed status prediction without modifying target
+ diff_mode:
+ description: Will return details on what has changed (or possibly needs changing in check_mode), when in diff mode
+ platform:
+ description: Target OS/families that can be operated against
+ support: N/A
+'''
+
+ ACTIONGROUPS = r'''
+attributes:
+ action_group:
+ description: Action is part of action_group(s), for convenient setting of module_defaults.
+ support: N/A
+ membership: []
+'''
+
+ CONN = r'''
+attributes:
+ become:
+ description: Is usable alongside become keywords
+ connection:
+ description: Uses the target's configured connection information to execute code on it
+ delegation:
+ description: Can be used in conjunction with delegate_to and related keywords
+'''
+
+ FACTS = r'''
+attributes:
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that will update existing host facts
+'''
+
+ FILES = r'''
+attributes:
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption
+ vault:
+ description: Can automatically decrypt Ansible vaulted files
+'''
+
+ FLOW = r'''
+attributes:
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller
+ async:
+ description: Supports being used with the C(async) keyword
+ bypass_host_loop:
+ description:
+ - Forces a 'global' task that does not execute per host, this bypasses per host templating and serial,
+ throttle and other loop considerations
+ - Conditionals will work as if C(run_once) is being used, variables used will be from the first available host
+ - This action will not work normally outside of lockstep strategies
+'''
+ RAW = r'''
+attributes:
+ raw:
+ description: Indicates if an action takes a 'raw' or 'free form' string as an option and has it's own special parsing of it
+'''
diff --git a/lib/ansible/plugins/doc_fragments/action_core.py b/lib/ansible/plugins/doc_fragments/action_core.py
new file mode 100644
index 0000000..931ca14
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/action_core.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) , Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+# WARNING: this is mostly here as a convinence for documenting core behaviours, no plugin outside of ansible-core should use this file
+class ModuleDocFragment(object):
+
+ # requires action_common
+ DOCUMENTATION = r'''
+attributes:
+ async:
+ support: none
+ become:
+ support: none
+ bypass_task_loop:
+ description: These tasks ignore the C(loop) and C(with_) keywords
+ core:
+ description: This is a 'core engine' feature and is not implemented like most task actions, so it is not overridable in any way via the plugin system.
+ support: full
+ connection:
+ support: none
+ ignore_conditional:
+ support: none
+ description: The action is not subject to conditional execution so it will ignore the C(when:) keyword
+ platform:
+ support: full
+ platforms: all
+ until:
+ description: Denotes if this action objeys until/retry/poll keywords
+ support: full
+ tags:
+ description: Allows for the 'tags' keyword to control the selection of this action for execution
+ support: full
+'''
+
+ # also requires core above
+ IMPORT = r'''
+attributes:
+ action:
+ details: While this action executes locally on the controller it is not governed by an action plugin
+ support: none
+ bypass_host_loop:
+ details: While the import can be host specific and runs per host it is not dealing with all available host variables,
+ use an include instead for those cases
+ support: partial
+ bypass_task_loop:
+ details: The task itself is not looped, but the loop is applied to each imported task
+ support: partial
+ delegation:
+ details: Since there are no connection nor facts, there is no sense in delegating imports
+ support: none
+ ignore_conditional:
+ details: While the action itself will ignore the conditional, it will be inherited by the imported tasks themselves
+ support: partial
+ tags:
+ details: Tags are not interpreted for this action, they are applied to the imported tasks
+ support: none
+ until:
+ support: none
+'''
+ # also requires core above
+ INCLUDE = r'''
+attributes:
+ action:
+ details: While this action executes locally on the controller it is not governed by an action plugin
+ support: none
+ bypass_host_loop:
+ support: none
+ bypass_task_loop:
+ support: none
+ delegation:
+ details: Since there are no connection nor facts, there is no sense in delegating includes
+ support: none
+ tags:
+ details: Tags are interpreted by this action but are not automatically inherited by the include tasks, see C(apply)
+ support: partial
+'''
diff --git a/lib/ansible/plugins/doc_fragments/backup.py b/lib/ansible/plugins/doc_fragments/backup.py
new file mode 100644
index 0000000..d2e76dc
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/backup.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
diff --git a/lib/ansible/plugins/doc_fragments/connection_pipelining.py b/lib/ansible/plugins/doc_fragments/connection_pipelining.py
new file mode 100644
index 0000000..fa18265
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/connection_pipelining.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # common shelldocumentation fragment
+ DOCUMENTATION = """
+options:
+ pipelining:
+ default: false
+ description:
+ - Pipelining reduces the number of connection operations required to execute a module on the remote server,
+ by executing many Ansible modules without actual file transfers.
+ - This can result in a very significant performance improvement when enabled.
+ - However this can conflict with privilege escalation (become).
+ For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
+ which is why this feature is disabled by default.
+ env:
+ - name: ANSIBLE_PIPELINING
+ ini:
+ - section: defaults
+ key: pipelining
+ - section: connection
+ key: pipelining
+ type: boolean
+ vars:
+ - name: ansible_pipelining
+"""
diff --git a/lib/ansible/plugins/doc_fragments/constructed.py b/lib/ansible/plugins/doc_fragments/constructed.py
new file mode 100644
index 0000000..7810acb
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/constructed.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ strict:
+ description:
+ - If C(yes) make invalid entries a fatal error, otherwise skip and continue.
+ - Since it is possible to use facts in the expressions they might not always be available
+ and we ignore those errors by default.
+ type: bool
+ default: no
+ compose:
+ description: Create vars from jinja2 expressions.
+ type: dict
+ default: {}
+ groups:
+ description: Add hosts to group based on Jinja2 conditionals.
+ type: dict
+ default: {}
+ keyed_groups:
+ description: Add hosts to group based on the values of a variable.
+ type: list
+ default: []
+ elements: dict
+ suboptions:
+ parent_group:
+ type: str
+ description: parent group for keyed group
+ prefix:
+ type: str
+ description: A keyed group name will start with this prefix
+ default: ''
+ separator:
+ type: str
+ description: separator used to build the keyed group name
+ default: "_"
+ key:
+ type: str
+ description:
+ - The key from input dictionary used to generate groups
+ default_value:
+ description:
+ - The default value when the host variable's value is an empty string.
+ - This option is mutually exclusive with C(trailing_separator).
+ type: str
+ version_added: '2.12'
+ trailing_separator:
+ description:
+ - Set this option to I(False) to omit the C(separator) after the host variable when the value is an empty string.
+ - This option is mutually exclusive with C(default_value).
+ type: bool
+ default: True
+ version_added: '2.12'
+ use_extra_vars:
+ version_added: '2.11'
+ description: Merge extra vars into the available variables for composition (highest precedence).
+ type: bool
+ default: False
+ ini:
+ - section: inventory_plugins
+ key: use_extra_vars
+ env:
+ - name: ANSIBLE_INVENTORY_USE_EXTRA_VARS
+ leading_separator:
+ description:
+ - Use in conjunction with keyed_groups.
+ - By default, a keyed group that does not have a prefix or a separator provided will have a name that starts with an underscore.
+ - This is because the default prefix is "" and the default separator is "_".
+ - Set this option to False to omit the leading underscore (or other separator) if no prefix is given.
+ - If the group name is derived from a mapping the separator is still used to concatenate the items.
+ - To not use a separator in the group name at all, set the separator for the keyed group to an empty string instead.
+ type: boolean
+ default: True
+ version_added: '2.11'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/decrypt.py b/lib/ansible/plugins/doc_fragments/decrypt.py
new file mode 100644
index 0000000..ea7cf59
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/decrypt.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Brian Coca <bcoca@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ decrypt:
+ description:
+ - This option controls the autodecryption of source files using vault.
+ type: bool
+ default: yes
+ version_added: '2.4'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/default_callback.py b/lib/ansible/plugins/doc_fragments/default_callback.py
new file mode 100644
index 0000000..5798334
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/default_callback.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+ options:
+ display_skipped_hosts:
+ name: Show skipped hosts
+ description: "Toggle to control displaying skipped task/host results in a task"
+ type: bool
+ default: yes
+ env:
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - key: display_skipped_hosts
+ section: defaults
+ display_ok_hosts:
+ name: Show 'ok' hosts
+ description: "Toggle to control displaying 'ok' task/host results in a task"
+ type: bool
+ default: yes
+ env:
+ - name: ANSIBLE_DISPLAY_OK_HOSTS
+ ini:
+ - key: display_ok_hosts
+ section: defaults
+ version_added: '2.7'
+ display_failed_stderr:
+ name: Use STDERR for failed and unreachable tasks
+ description: "Toggle to control whether failed and unreachable tasks are displayed to STDERR (vs. STDOUT)"
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_DISPLAY_FAILED_STDERR
+ ini:
+ - key: display_failed_stderr
+ section: defaults
+ version_added: '2.7'
+ show_custom_stats:
+ name: Show custom stats
+ description: 'This adds the custom stats set via the set_stats plugin to the play recap'
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_CUSTOM_STATS
+ ini:
+ - key: show_custom_stats
+ section: defaults
+ show_per_host_start:
+ name: Show per host task start
+ description: 'This adds output that shows when a task is started to execute for each host'
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_PER_HOST_START
+ ini:
+ - key: show_per_host_start
+ section: defaults
+ version_added: '2.9'
+ check_mode_markers:
+ name: Show markers when running in check mode
+ description:
+ - Toggle to control displaying markers when running in check mode.
+ - "The markers are C(DRY RUN) at the beginning and ending of playbook execution (when calling C(ansible-playbook --check))
+ and C(CHECK MODE) as a suffix at every play and task that is run in check mode."
+ type: bool
+ default: no
+ version_added: '2.9'
+ env:
+ - name: ANSIBLE_CHECK_MODE_MARKERS
+ ini:
+ - key: check_mode_markers
+ section: defaults
+ show_task_path_on_failure:
+ name: Show file path on failed tasks
+ description:
+ When a task fails, display the path to the file containing the failed task and the line number.
+ This information is displayed automatically for every task when running with C(-vv) or greater verbosity.
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_TASK_PATH_ON_FAILURE
+ ini:
+ - key: show_task_path_on_failure
+ section: defaults
+ version_added: '2.11'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/files.py b/lib/ansible/plugins/doc_fragments/files.py
new file mode 100644
index 0000000..b87fd11
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/files.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+
+ # Note: mode is overridden by the copy and template modules so if you change the description
+ # here, you should also change it there.
+ DOCUMENTATION = r'''
+options:
+ mode:
+ description:
+ - The permissions the resulting filesystem object should have.
+ - For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers.
+ You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
+ (like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives
+ a string and can do its own conversion from string into number.
+ - Giving Ansible a number without following one of these rules will end up with a decimal
+ number which will have unexpected results.
+ - As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or
+ C(u=rw,g=r,o=r)).
+ - If C(mode) is not specified and the destination filesystem object B(does not) exist, the default C(umask) on the system will be used
+ when setting the mode for the newly created filesystem object.
+ - If C(mode) is not specified and the destination filesystem object B(does) exist, the mode of the existing filesystem object will be used.
+ - Specifying C(mode) is the best way to ensure filesystem objects are created with the correct permissions.
+ See CVE-2020-1736 for further details.
+ type: raw
+ owner:
+ description:
+ - Name of the user that should own the filesystem object, as would be fed to I(chown).
+ - When left unspecified, it uses the current user unless you are root, in which
+ case it can preserve the previous ownership.
+ - Specifying a numeric username will be assumed to be a user ID and not a username. Avoid numeric usernames to avoid this confusion.
+
+ type: str
+ group:
+ description:
+ - Name of the group that should own the filesystem object, as would be fed to I(chown).
+ - When left unspecified, it uses the current group of the current user unless you are root,
+ in which case it can preserve the previous ownership.
+ type: str
+ seuser:
+ description:
+ - The user part of the SELinux filesystem object context.
+ - By default it uses the C(system) policy, where applicable.
+ - When set to C(_default), it will use the C(user) portion of the policy if available.
+ type: str
+ serole:
+ description:
+ - The role part of the SELinux filesystem object context.
+ - When set to C(_default), it will use the C(role) portion of the policy if available.
+ type: str
+ setype:
+ description:
+ - The type part of the SELinux filesystem object context.
+ - When set to C(_default), it will use the C(type) portion of the policy if available.
+ type: str
+ selevel:
+ description:
+ - The level part of the SELinux filesystem object context.
+ - This is the MLS/MCS attribute, sometimes known as the C(range).
+ - When set to C(_default), it will use the C(level) portion of the policy if available.
+ type: str
+ unsafe_writes:
+ description:
+ - Influence when to use atomic operation to prevent data corruption or inconsistent reads from the target filesystem object.
+ - By default this module uses atomic operations to prevent data corruption or inconsistent reads from the target filesystem objects,
+ but sometimes systems are configured or just broken in ways that prevent this. One example is docker mounted filesystem objects,
+ which cannot be updated atomically from inside the container and can only be written in an unsafe manner.
+ - This option allows Ansible to fall back to unsafe methods of updating filesystem objects when atomic operations fail
+ (however, it doesn't force Ansible to perform unsafe writes).
+ - IMPORTANT! Unsafe writes are subject to race conditions and can lead to data corruption.
+ type: bool
+ default: no
+ version_added: '2.2'
+ attributes:
+ description:
+ - The attributes the resulting filesystem object should have.
+ - To get supported flags look at the man page for I(chattr) on the target system.
+ - This string should contain the attributes in the same order as the one displayed by I(lsattr).
+ - The C(=) operator is assumed as default, otherwise C(+) or C(-) operators need to be included in the string.
+ type: str
+ aliases: [ attr ]
+ version_added: '2.3'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/inventory_cache.py b/lib/ansible/plugins/doc_fragments/inventory_cache.py
new file mode 100644
index 0000000..9326c3f
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/inventory_cache.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # inventory cache
+ DOCUMENTATION = r'''
+options:
+ cache:
+ description:
+ - Toggle to enable/disable the caching of the inventory's source data, requires a cache plugin setup to work.
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_INVENTORY_CACHE
+ ini:
+ - section: inventory
+ key: cache
+ cache_plugin:
+ description:
+ - Cache plugin to use for the inventory's source data.
+ type: str
+ default: memory
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN
+ - name: ANSIBLE_INVENTORY_CACHE_PLUGIN
+ ini:
+ - section: defaults
+ key: fact_caching
+ - section: inventory
+ key: cache_plugin
+ cache_timeout:
+ description:
+ - Cache duration in seconds
+ default: 3600
+ type: int
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ - name: ANSIBLE_INVENTORY_CACHE_TIMEOUT
+ ini:
+ - section: defaults
+ key: fact_caching_timeout
+ - section: inventory
+ key: cache_timeout
+ cache_connection:
+ description:
+ - Cache connection data or path, read cache plugin documentation for specifics.
+ type: str
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ - name: ANSIBLE_INVENTORY_CACHE_CONNECTION
+ ini:
+ - section: defaults
+ key: fact_caching_connection
+ - section: inventory
+ key: cache_connection
+ cache_prefix:
+ description:
+ - Prefix to use for cache plugin files/tables
+ default: ansible_inventory_
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ - name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX
+ ini:
+ - section: default
+ key: fact_caching_prefix
+ deprecated:
+ alternatives: Use the 'defaults' section instead
+ why: Fixes typing error in INI section name
+ version: '2.16'
+ - section: defaults
+ key: fact_caching_prefix
+ - section: inventory
+ key: cache_prefix
+'''
diff --git a/lib/ansible/plugins/doc_fragments/result_format_callback.py b/lib/ansible/plugins/doc_fragments/result_format_callback.py
new file mode 100644
index 0000000..1b71173
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/result_format_callback.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+ options:
+ result_format:
+ name: Format of the task result
+ description:
+ - Define the task result format used in the callback output.
+ - These formats do not cause the callback to emit valid JSON or YAML formats.
+ - The output contains these formats interspersed with other non-machine parsable data.
+ type: str
+ default: json
+ env:
+ - name: ANSIBLE_CALLBACK_RESULT_FORMAT
+ ini:
+ - key: callback_result_format
+ section: defaults
+ choices:
+ - json
+ - yaml
+ version_added: '2.13'
+ pretty_results:
+ name: Configure output for readability
+ description:
+ - Configure the result format to be more readable
+ - When the result format is set to C(yaml) this option defaults to C(True), and defaults
+ to C(False) when configured to C(json).
+ - Setting this option to C(True) will force C(json) and C(yaml) results to always be pretty
+ printed regardless of verbosity.
+ - When set to C(True) and used with the C(yaml) result format, this option will
+ modify module responses in an attempt to produce a more human friendly output at the expense
+ of correctness, and should not be relied upon to aid in writing variable manipulations
+ or conditionals. For correctness, set this option to C(False) or set the result format to C(json).
+ type: bool
+ default: null
+ env:
+ - name: ANSIBLE_CALLBACK_FORMAT_PRETTY
+ ini:
+ - key: callback_format_pretty
+ section: defaults
+ version_added: '2.13'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/return_common.py b/lib/ansible/plugins/doc_fragments/return_common.py
new file mode 100644
index 0000000..6f54288
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/return_common.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard documentation fragment
+ RETURN = r'''
+changed:
+ description: Whether the module affected changes on the target.
+ returned: always
+ type: bool
+ sample: false
+failed:
+ description: Whether the module failed to execute.
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Human-readable message.
+ returned: as needed
+ type: str
+ sample: all ok
+skipped:
+ description: Whether the module was skipped.
+ returned: always
+ type: bool
+ sample: false
+results:
+ description: List of module results,
+ returned: when using a loop.
+ type: list
+ sample: [{changed: True, msg: 'first item changed'}, {changed: False, msg: 'second item ok'}]
+exception:
+ description: Optional information from a handled error.
+ returned: on some errors
+ type: str
+ sample: Unknown error
+'''
diff --git a/lib/ansible/plugins/doc_fragments/shell_common.py b/lib/ansible/plugins/doc_fragments/shell_common.py
new file mode 100644
index 0000000..fe1ae4e
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/shell_common.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # common shelldocumentation fragment
+ DOCUMENTATION = """
+options:
+ remote_tmp:
+ description:
+ - Temporary directory to use on targets when executing tasks.
+ default: '~/.ansible/tmp'
+ env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
+ ini:
+ - section: defaults
+ key: remote_tmp
+ vars:
+ - name: ansible_remote_tmp
+ common_remote_group:
+ name: Enables changing the group ownership of temporary files and directories
+ default: null
+ description:
+ - Checked when Ansible needs to execute a module as a different user.
+ - If setfacl and chown both fail and do not let the different user access the module's files, they will be chgrp'd to this group.
+ - In order for this to work, the remote_user and become_user must share a common group and this setting must be set to that group.
+ env: [{name: ANSIBLE_COMMON_REMOTE_GROUP}]
+ vars:
+ - name: ansible_common_remote_group
+ ini:
+ - {key: common_remote_group, section: defaults}
+ version_added: "2.10"
+ system_tmpdirs:
+ description:
+ - "List of valid system temporary directories on the managed machine for Ansible to validate
+ C(remote_tmp) against, when specific permissions are needed. These must be world
+ readable, writable, and executable. This list should only contain directories which the
+ system administrator has pre-created with the proper ownership and permissions otherwise
+ security issues can arise."
+ - When C(remote_tmp) is required to be a system temp dir and it does not match any in the list,
+ the first one from the list will be used instead.
+ default: [ /var/tmp, /tmp ]
+ type: list
+ elements: string
+ env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
+ ini:
+ - section: defaults
+ key: system_tmpdirs
+ vars:
+ - name: ansible_system_tmpdirs
+ async_dir:
+ description:
+ - Directory in which ansible will keep async job information
+ default: '~/.ansible_async'
+ env: [{name: ANSIBLE_ASYNC_DIR}]
+ ini:
+ - section: defaults
+ key: async_dir
+ vars:
+ - name: ansible_async_dir
+ environment:
+ type: list
+ elements: dictionary
+ default: [{}]
+ description:
+ - List of dictionaries of environment variables and their values to use when executing commands.
+ keyword:
+ - name: environment
+ admin_users:
+ type: list
+ elements: string
+ default: ['root', 'toor']
+ description:
+ - list of users to be expected to have admin privileges. This is used by the controller to
+ determine how to share temporary files between the remote user and the become user.
+ env:
+ - name: ANSIBLE_ADMIN_USERS
+ ini:
+ - section: defaults
+ key: admin_users
+ vars:
+ - name: ansible_admin_users
+ world_readable_temp:
+ version_added: '2.10'
+ default: False
+ description:
+ - This makes the temporary files created on the machine world-readable and will issue a warning instead of failing the task.
+ - It is useful when becoming an unprivileged user.
+ env:
+ - name: ANSIBLE_SHELL_ALLOW_WORLD_READABLE_TEMP
+ vars:
+ - name: ansible_shell_allow_world_readable_temp
+ ini:
+ - {key: allow_world_readable_tmpfiles, section: defaults}
+ type: boolean
+"""
diff --git a/lib/ansible/plugins/doc_fragments/shell_windows.py b/lib/ansible/plugins/doc_fragments/shell_windows.py
new file mode 100644
index 0000000..ac52c60
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/shell_windows.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Windows shell documentation fragment
+ # FIXME: set_module_language don't belong here but must be set so they don't fail when someone
+ # get_option('set_module_language') on this plugin
+ DOCUMENTATION = r"""
+options:
+ async_dir:
+ description:
+ - Directory in which ansible will keep async job information.
+ - Before Ansible 2.8, this was set to C(remote_tmp + "\.ansible_async").
+ default: '%USERPROFILE%\.ansible_async'
+ ini:
+ - section: powershell
+ key: async_dir
+ vars:
+ - name: ansible_async_dir
+ version_added: '2.8'
+ remote_tmp:
+ description:
+ - Temporary directory to use on targets when copying files to the host.
+ default: '%TEMP%'
+ ini:
+ - section: powershell
+ key: remote_tmp
+ vars:
+ - name: ansible_remote_tmp
+ set_module_language:
+ description:
+ - Controls if we set the locale for modules when executing on the
+ target.
+ - Windows only supports C(no) as an option.
+ type: bool
+ default: 'no'
+ choices: ['no', False]
+ environment:
+ description:
+ - List of dictionaries of environment variables and their values to use when
+ executing commands.
+ keyword:
+ - name: environment
+ type: list
+ elements: dictionary
+ default: [{}]
+"""
diff --git a/lib/ansible/plugins/doc_fragments/template_common.py b/lib/ansible/plugins/doc_fragments/template_common.py
new file mode 100644
index 0000000..6276e84
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/template_common.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard template documentation fragment, use by template and win_template.
+ DOCUMENTATION = r'''
+description:
+- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
+- Documentation on the template formatting can be found in the
+ L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
+- Additional variables listed below can be used in templates.
+- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
+ describe the template name, host, modification time of the template file and the owner uid.
+- C(template_host) contains the node name of the template's machine.
+- C(template_uid) is the numeric user id of the owner.
+- C(template_path) is the path of the template.
+- C(template_fullpath) is the absolute path of the template.
+- C(template_destpath) is the path of the template on the remote system (added in 2.8).
+- C(template_run_date) is the date that the template was rendered.
+options:
+ src:
+ description:
+ - Path of a Jinja2 formatted template on the Ansible controller.
+ - This can be a relative or an absolute path.
+ - The file must be encoded with C(utf-8) but I(output_encoding) can be used to control the encoding of the output
+ template.
+ type: path
+ required: yes
+ dest:
+ description:
+ - Location to render the template to on the remote machine.
+ type: path
+ required: yes
+ newline_sequence:
+ description:
+ - Specify the newline sequence to use for templating files.
+ type: str
+ choices: [ '\n', '\r', '\r\n' ]
+ default: '\n'
+ version_added: '2.4'
+ block_start_string:
+ description:
+ - The string marking the beginning of a block.
+ type: str
+ default: '{%'
+ version_added: '2.4'
+ block_end_string:
+ description:
+ - The string marking the end of a block.
+ type: str
+ default: '%}'
+ version_added: '2.4'
+ variable_start_string:
+ description:
+ - The string marking the beginning of a print statement.
+ type: str
+ default: '{{'
+ version_added: '2.4'
+ variable_end_string:
+ description:
+ - The string marking the end of a print statement.
+ type: str
+ default: '}}'
+ version_added: '2.4'
+ comment_start_string:
+ description:
+ - The string marking the beginning of a comment statement.
+ type: str
+ version_added: '2.12'
+ comment_end_string:
+ description:
+ - The string marking the end of a comment statement.
+ type: str
+ version_added: '2.12'
+ trim_blocks:
+ description:
+ - Determine when newlines should be removed from blocks.
+ - When set to C(yes) the first newline after a block is removed (block, not variable tag!).
+ type: bool
+ default: yes
+ version_added: '2.4'
+ lstrip_blocks:
+ description:
+ - Determine when leading spaces and tabs should be stripped.
+ - When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
+ type: bool
+ default: no
+ version_added: '2.6'
+ force:
+ description:
+ - Determine when the file is being transferred if the destination already exists.
+ - When set to C(yes), replace the remote file when contents are different than the source.
+ - When set to C(no), the file will only be transferred if the destination does not exist.
+ type: bool
+ default: yes
+ output_encoding:
+ description:
+ - Overrides the encoding used to write the template file defined by C(dest).
+ - It defaults to C(utf-8), but any encoding supported by python can be used.
+ - The source template file must always be encoded using C(utf-8), for homogeneity.
+ type: str
+ default: utf-8
+ version_added: '2.7'
+notes:
+- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
+- Since Ansible 0.9, templates are loaded with C(trim_blocks=True).
+- >
+ Also, you can override jinja2 settings by adding a special header to template file.
+ i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
+ which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
+ This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
+- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>)
+ on Linux.
+'''
diff --git a/lib/ansible/plugins/doc_fragments/url.py b/lib/ansible/plugins/doc_fragments/url.py
new file mode 100644
index 0000000..eb2b17f
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/url.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, John Barker <gundalow@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ url:
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ type: str
+ force:
+ description:
+ - If C(yes) do not get a cached copy.
+ type: bool
+ default: no
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ type: bool
+ default: yes
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without I(url_password) for sites that allow empty passwords
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
+ type: str
+ force_basic_auth:
+ description:
+ - Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
+ type: bool
+ default: no
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, C(client_key) is not required.
+ type: path
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If C(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ use_gssapi:
+ description:
+ - Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
+ authentication.
+ - Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
+ - Credentials for GSSAPI can be specified with I(url_username)/I(url_password) or with the GSSAPI env var
+ C(KRB5CCNAME) that specified a custom Kerberos credential cache.
+ - NTLM authentication is C(not) supported even if the GSSAPI mech for NTLM has been installed.
+ type: bool
+ default: no
+ version_added: '2.11'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/url_windows.py b/lib/ansible/plugins/doc_fragments/url_windows.py
new file mode 100644
index 0000000..286f4b4
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/url_windows.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment:
+
+ # Common options for Ansible.ModuleUtils.WebRequest
+ DOCUMENTATION = r'''
+options:
+ method:
+ description:
+ - The HTTP Method of the request.
+ type: str
+ follow_redirects:
+ description:
+ - Whether or the module should follow redirects.
+ - C(all) will follow all redirect.
+ - C(none) will not follow any redirect.
+ - C(safe) will follow only "safe" redirects, where "safe" means that the
+ client is only doing a C(GET) or C(HEAD) on the URI to which it is being
+ redirected.
+ - When following a redirected URL, the C(Authorization) header and any
+ credentials set will be dropped and not redirected.
+ choices:
+ - all
+ - none
+ - safe
+ default: safe
+ type: str
+ headers:
+ description:
+ - Extra headers to set on the request.
+ - This should be a dictionary where the key is the header name and the
+ value is the value for that header.
+ type: dict
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ - This is set to the C(User-Agent) header on a HTTP request.
+ default: ansible-httpget
+ type: str
+ maximum_redirection:
+ description:
+ - Specify how many times the module will redirect a connection to an
+ alternative URI before the connection fails.
+ - If set to C(0) or I(follow_redirects) is set to C(none), or C(safe) when
+ not doing a C(GET) or C(HEAD) it prevents all redirection.
+ default: 50
+ type: int
+ timeout:
+ description:
+ - Specifies how long the request can be pending before it times out (in
+ seconds).
+ - Set to C(0) to specify an infinite timeout.
+ default: 30
+ type: int
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed
+ certificates.
+ default: yes
+ type: bool
+ client_cert:
+ description:
+ - The path to the client certificate (.pfx) that is used for X509
+ authentication. This path can either be the path to the C(pfx) on the
+ filesystem or the PowerShell certificate path
+ C(Cert:\CurrentUser\My\<thumbprint>).
+ - The WinRM connection must be authenticated with C(CredSSP) or C(become)
+ is used on the task if the certificate file is not password protected.
+ - Other authentication types can set I(client_cert_password) when the cert
+ is password protected.
+ type: str
+ client_cert_password:
+ description:
+ - The password for I(client_cert) if the cert is password protected.
+ type: str
+ force_basic_auth:
+ description:
+ - By default the authentication header is only sent when a webservice
+ responses to an initial request with a 401 status. Since some basic auth
+ services do not properly send a 401, logins will fail.
+ - This option forces the sending of the Basic authentication header upon
+ the original request.
+ default: no
+ type: bool
+ url_username:
+ description:
+ - The username to use for authentication.
+ type: str
+ url_password:
+ description:
+ - The password for I(url_username).
+ type: str
+ use_default_credential:
+ description:
+ - Uses the current user's credentials when authenticating with a server
+ protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
+ - Sites that use C(Basic) auth will still require explicit credentials
+ through the I(url_username) and I(url_password) options.
+ - The module will only have access to the user's credentials if using
+ C(become) with a password, you are connecting with SSH using a password,
+ or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
+ - If not using C(become) or a different auth method to the ones stated
+ above, there will be no default credentials available and no
+ authentication will occur.
+ default: no
+ type: bool
+ use_proxy:
+ description:
+ - If C(no), it will not use the proxy defined in IE for the current user.
+ default: yes
+ type: bool
+ proxy_url:
+ description:
+ - An explicit proxy to use for the request.
+ - By default, the request will use the IE defined proxy unless I(use_proxy)
+ is set to C(no).
+ type: str
+ proxy_username:
+ description:
+ - The username to use for proxy authentication.
+ type: str
+ proxy_password:
+ description:
+ - The password for I(proxy_username).
+ type: str
+ proxy_use_default_credential:
+ description:
+ - Uses the current user's credentials when authenticating with a proxy host
+ protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
+ - Proxies that use C(Basic) auth will still require explicit credentials
+ through the I(proxy_username) and I(proxy_password) options.
+ - The module will only have access to the user's credentials if using
+ C(become) with a password, you are connecting with SSH using a password,
+ or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
+ - If not using C(become) or a different auth method to the ones stated
+ above, there will be no default credentials available and no proxy
+ authentication will occur.
+ default: no
+ type: bool
+seealso:
+- module: community.windows.win_inet_proxy
+'''
diff --git a/lib/ansible/plugins/doc_fragments/validate.py b/lib/ansible/plugins/doc_fragments/validate.py
new file mode 100644
index 0000000..ac66d25
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/validate.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ validate:
+ description:
+ - The validation command to run before copying the updated file into the final destination.
+ - A temporary file path is used to validate, passed in through '%s' which must be present as in the examples below.
+ - Also, the command is passed securely so shell features such as expansion and pipes will not work.
+ - For an example on how to handle more complex validation than what this
+ option provides, see R(handling complex validation,complex_configuration_validation).
+ type: str
+'''
diff --git a/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py b/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py
new file mode 100644
index 0000000..b2da29c
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ stage:
+ description:
+ - Control when this vars plugin may be executed.
+ - Setting this option to C(all) will run the vars plugin after importing inventory and whenever it is demanded by a task.
+ - Setting this option to C(task) will only run the vars plugin whenever it is demanded by a task.
+ - Setting this option to C(inventory) will only run the vars plugin after parsing inventory.
+ - If this option is omitted, the global I(RUN_VARS_PLUGINS) configuration is used to determine when to execute the vars plugin.
+ choices: ['all', 'task', 'inventory']
+ version_added: "2.10"
+ type: str
+'''
diff --git a/lib/ansible/plugins/filter/__init__.py b/lib/ansible/plugins/filter/__init__.py
new file mode 100644
index 0000000..5ae10da
--- /dev/null
+++ b/lib/ansible/plugins/filter/__init__.py
@@ -0,0 +1,14 @@
+# (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.plugins import AnsibleJinja2Plugin
+
+
+class AnsibleJinja2Filter(AnsibleJinja2Plugin):
+
+ def _no_options(self, *args, **kwargs):
+ raise NotImplementedError("Jinaj2 filter plugins do not support option functions, they use direct arguments instead.")
diff --git a/lib/ansible/plugins/filter/b64decode.yml b/lib/ansible/plugins/filter/b64decode.yml
new file mode 100644
index 0000000..30565fa
--- /dev/null
+++ b/lib/ansible/plugins/filter/b64decode.yml
@@ -0,0 +1,29 @@
+DOCUMENTATION:
+ name: b64decode
+ author: ansible core team
+ version_added: 'historical'
+ short_description: Decode a base64 string
+ description:
+ - Base64 decoding function.
+ - The return value is a string.
+ - Trying to store a binary blob in a string most likely corrupts the binary. To base64 decode a binary blob,
+ use the ``base64`` command and pipe the encoded data through standard input.
+ For example, in the ansible.builtin.shell`` module, ``cmd="base64 --decode > myfile.bin" stdin="{{ encoded }}"``.
+ positional: _input
+ options:
+ _input:
+ description: A base64 string to decode.
+ type: string
+ required: true
+
+EXAMPLES: |
+ # b64 decode a string
+ lola: "{{ 'bG9sYQ==' | b64decode }}"
+
+ # b64 decode the content of 'b64stuff' variable
+ stuff: "{{ b64stuff | b64encode }}"
+
+RETURN:
+ _value:
+ description: The contents of the base64 encoded string.
+ type: string
diff --git a/lib/ansible/plugins/filter/b64encode.yml b/lib/ansible/plugins/filter/b64encode.yml
new file mode 100644
index 0000000..14676e5
--- /dev/null
+++ b/lib/ansible/plugins/filter/b64encode.yml
@@ -0,0 +1,25 @@
+DOCUMENTATION:
+ name: b64encode
+ author: ansible core team
+ version_added: 'historical'
+ short_description: Encode a string as base64
+ description:
+ - Base64 encoding function.
+ positional: _input
+ options:
+ _input:
+ description: A string to encode.
+ type: string
+ required: true
+
+EXAMPLES: |
+ # b64 encode a string
+ b64lola: "{{ 'lola'|b64encode }}"
+
+ # b64 encode the content of 'stuff' variable
+ b64stuff: "{{ stuff|b64encode }}"
+
+RETURN:
+ _value:
+ description: A base64 encoded string.
+ type: string
diff --git a/lib/ansible/plugins/filter/basename.yml b/lib/ansible/plugins/filter/basename.yml
new file mode 100644
index 0000000..4e868df
--- /dev/null
+++ b/lib/ansible/plugins/filter/basename.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: basename
+ author: ansible core team
+ version_added: "historical"
+ short_description: get a path's base name
+ description:
+ - Returns the last name component of a path, what is left in the string that is not 'dirname'.
+ options:
+ _input:
+ description: A path.
+ type: path
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.dirname
+EXAMPLES: |
+
+ # To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt'
+ {{ mypath | basename }}
+
+RETURN:
+ _value:
+ description: The base name from the path provided.
+ type: str
diff --git a/lib/ansible/plugins/filter/bool.yml b/lib/ansible/plugins/filter/bool.yml
new file mode 100644
index 0000000..86ba353
--- /dev/null
+++ b/lib/ansible/plugins/filter/bool.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: bool
+ version_added: "historical"
+ short_description: cast into a boolean
+ description:
+ - Attempt to cast the input into a boolean (C(True) or C(False)) value.
+ positional: _input
+ options:
+ _input:
+ description: Data to cast.
+ type: raw
+ required: true
+
+EXAMPLES: |
+
+ # simply encrypt my key in a vault
+ vars:
+ isbool: "{{ (a == b)|bool }} "
+ otherbool: "{{ anothervar|bool }} "
+
+ # in a task
+ ...
+ when: some_string_value | bool
+
+RETURN:
+ _value:
+ description: The boolean resulting of casting the input expression into a C(True) or C(False) value.
+ type: bool
diff --git a/lib/ansible/plugins/filter/checksum.yml b/lib/ansible/plugins/filter/checksum.yml
new file mode 100644
index 0000000..2f8eadd
--- /dev/null
+++ b/lib/ansible/plugins/filter/checksum.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: checksum
+ version_added: "1.9"
+ short_description: checksum of input data
+ description:
+ - Returns a checksum (L(SHA-1, https://en.wikipedia.org/wiki/SHA-1)) hash of the input data.
+ positional: _input
+ options:
+ _input:
+ description: Data to checksum.
+ type: raw
+ required: true
+
+EXAMPLES: |
+ # csum => "109f4b3c50d7b0df729d299bc6f8e9ef9066971f"
+ csum: "{{ 'test2' | checksum }}"
+
+RETURN:
+ _value:
+ description: The checksum (SHA-1) of the input.
+ type: string
diff --git a/lib/ansible/plugins/filter/combinations.yml b/lib/ansible/plugins/filter/combinations.yml
new file mode 100644
index 0000000..a46e51e
--- /dev/null
+++ b/lib/ansible/plugins/filter/combinations.yml
@@ -0,0 +1,26 @@
+DOCUMENTATION:
+ name: combinations
+ version_added: "historical"
+ short_description: combinations from the elements of a list
+ description:
+ - Create a list of combinations of sets from the elements of a list.
+ positional: _input, set_size
+ options:
+ _input:
+ description: Elements to combine.
+ type: list
+ required: true
+ set_size:
+ description: The size of the set for each combination.
+ type: int
+ required: true
+EXAMPLES: |
+
+ # combos_of_two => [ [ 1, 2 ], [ 1, 3 ], [ 1, 4 ], [ 1, 5 ], [ 2, 3 ], [ 2, 4 ], [ 2, 5 ], [ 3, 4 ], [ 3, 5 ], [ 4, 5 ] ]
+ combos_of_two: "{{ [1,2,3,4,5] | combinations(2) }}"
+
+
+RETURN:
+ _value:
+ description: List of combination sets resulting from the supplied elements and set size.
+ type: list
diff --git a/lib/ansible/plugins/filter/combine.yml b/lib/ansible/plugins/filter/combine.yml
new file mode 100644
index 0000000..86788f3
--- /dev/null
+++ b/lib/ansible/plugins/filter/combine.yml
@@ -0,0 +1,44 @@
+DOCUMENTATION:
+ name: combine
+ version_added: "2.0"
+ short_description: combine two dictionaries
+ description:
+ - Create a dictionary (hash/associative array) as a result of merging existing dictionaries.
+ positional: _input, _dicts
+ options:
+ _input:
+ description: First dictionary to combine.
+ type: dict
+ required: true
+ _dicts: # TODO: this is really an *args so not list, but list ref
+ description: The list of dictionaries to combine.
+ type: list
+ elements: dictionary
+ required: true
+ recursive:
+ description: If C(True), merge elements recursively.
+ type: bool
+ default: false
+ list_merge:
+ description: Behavior when encountering list elements.
+ type: str
+ default: replace
+ choices:
+ replace: overwrite older entries with newer ones
+ keep: discard newer entries
+ append: append newer entries to the older ones
+ prepend: insert newer entries in front of the older ones
+ append_rp: append newer entries to the older ones, overwrite duplicates
+ prepend_rp: insert newer entries in front of the older ones, discard duplicates
+
+EXAMPLES: |
+
+ # ab => {'a':1, 'b':3, 'c': 4}
+ ab: {{ {'a':1, 'b':2} | combine({'b':3, 'c':4}) }}
+
+ many: "{{ dict1 | combine(dict2, dict3, dict4) }}"
+
+RETURN:
+ _value:
+ description: Resulting merge of supplied dictionaries.
+ type: dict
diff --git a/lib/ansible/plugins/filter/comment.yml b/lib/ansible/plugins/filter/comment.yml
new file mode 100644
index 0000000..95a4efb
--- /dev/null
+++ b/lib/ansible/plugins/filter/comment.yml
@@ -0,0 +1,60 @@
+DOCUMENTATION:
+ name: comment
+ version_added: 'historical'
+ short_description: comment out a string
+ description:
+ - Use programming language conventions to turn the input string into an embeddable comment.
+ positional: _input, style
+ options:
+ _input:
+ description: String to comment.
+ type: string
+ required: true
+ style:
+ description: Comment style to use.
+ type: string
+ default: plain
+ choices: ['plain', 'decoration', 'erlang', 'c', 'cblock', 'xml']
+ decoration:
+ description: Indicator for comment or intermediate comment depending on the style.
+ type: string
+ begining:
+ description: Indicator of the start of a comment block, only available for styles that support multiline comments.
+ type: string
+ end:
+ description: Indicator the end of a comment block, only available for styles that support multiline comments.
+ type: string
+ newline:
+ description: Indicator of comment end of line, only available for styles that support multiline comments.
+ type: string
+ default: '\n'
+ prefix:
+ description: Token to start each line inside a comment block, only available for styles that support multiline comments.
+ type: string
+ prefix_count:
+ description: Number of times to add a prefix at the start of a line, when a prefix exists and is usable.
+ type: int
+ default: 1
+ postfix:
+ description: Indicator of the end of each line inside a comment block, only available for styles that support multiline comments.
+ type: string
+ protfix_count:
+ description: Number of times to add a postfix at the end of a line, when a prefix exists and is usable.
+ type: int
+ default: 1
+
+EXAMPLES: |
+
+ # commented => #
+ # # Plain style (default)
+ # #
+ commented: "{{ 'Plain style (default)' | comment }}"
+
+ # not going to show that here ...
+ verycustom: "{{ "Custom style" | comment('plain', prefix='#######\n#', postfix='#\n#######\n ###\n #') }}"
+
+
+RETURN:
+ _value:
+ description: The 'commented out' string.
+ type: string
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
new file mode 100644
index 0000000..52a2cd1
--- /dev/null
+++ b/lib/ansible/plugins/filter/core.py
@@ -0,0 +1,658 @@
+# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import glob
+import hashlib
+import json
+import ntpath
+import os.path
+import re
+import shlex
+import sys
+import time
+import uuid
+import yaml
+import datetime
+
+from collections.abc import Mapping
+from functools import partial
+from random import Random, SystemRandom, shuffle
+
+from jinja2.filters import pass_environment
+
+from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils.six import string_types, integer_types, reraise, text_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common.yaml import yaml_load, yaml_load_all
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.template import recursive_check_defined
+from ansible.utils.display import Display
+from ansible.utils.encrypt import passlib_or_crypt
+from ansible.utils.hashing import md5s, checksum_s
+from ansible.utils.unicode import unicode_wrap
+from ansible.utils.vars import merge_hash
+
+display = Display()
+
+UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
+
+
+def to_yaml(a, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ default_flow_style = kw.pop('default_flow_style', None)
+ try:
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
+ except Exception as e:
+ raise AnsibleFilterError("to_yaml - %s" % to_native(e), orig_exc=e)
+ return to_text(transformed)
+
+
+def to_nice_yaml(a, indent=4, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ try:
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
+ except Exception as e:
+ raise AnsibleFilterError("to_nice_yaml - %s" % to_native(e), orig_exc=e)
+ return to_text(transformed)
+
+
+def to_json(a, *args, **kw):
+ ''' Convert the value to JSON '''
+
+ # defaults for filters
+ if 'vault_to_text' not in kw:
+ kw['vault_to_text'] = True
+ if 'preprocess_unsafe' not in kw:
+ kw['preprocess_unsafe'] = False
+
+ return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
+
+
+def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
+ '''Make verbose, human readable JSON'''
+ return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
+
+
+def to_bool(a):
+ ''' return a bool for the arg '''
+ if a is None or isinstance(a, bool):
+ return a
+ if isinstance(a, string_types):
+ a = a.lower()
+ if a in ('yes', 'on', '1', 'true', 1):
+ return True
+ return False
+
+
+def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
+ return datetime.datetime.strptime(string, format)
+
+
+def strftime(string_format, second=None, utc=False):
+ ''' return a date string using string. See https://docs.python.org/3/library/time.html#time.strftime for format '''
+ if utc:
+ timefn = time.gmtime
+ else:
+ timefn = time.localtime
+ if second is not None:
+ try:
+ second = float(second)
+ except Exception:
+ raise AnsibleFilterError('Invalid value for epoch value (%s)' % second)
+ return time.strftime(string_format, timefn(second))
+
+
+def quote(a):
+ ''' return its argument quoted for shell usage '''
+ if a is None:
+ a = u''
+ return shlex.quote(to_text(a))
+
+
+def fileglob(pathname):
+ ''' return list of matched regular files for glob '''
+ return [g for g in glob.glob(pathname) if os.path.isfile(g)]
+
+
+def regex_replace(value='', pattern='', replacement='', ignorecase=False, multiline=False):
+ ''' Perform a `re.sub` returning a string '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ _re = re.compile(pattern, flags=flags)
+ return _re.sub(replacement, value)
+
+
+def regex_findall(value, regex, multiline=False, ignorecase=False):
+ ''' Perform re.findall and return the list of matches '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ return re.findall(regex, value, flags)
+
+
+def regex_search(value, regex, *args, **kwargs):
+ ''' Perform re.search and return the list of matches or a backref '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ groups = list()
+ for arg in args:
+ if arg.startswith('\\g'):
+ match = re.match(r'\\g<(\S+)>', arg).group(1)
+ groups.append(match)
+ elif arg.startswith('\\'):
+ match = int(re.match(r'\\(\d+)', arg).group(1))
+ groups.append(match)
+ else:
+ raise AnsibleFilterError('Unknown argument')
+
+ flags = 0
+ if kwargs.get('ignorecase'):
+ flags |= re.I
+ if kwargs.get('multiline'):
+ flags |= re.M
+
+ match = re.search(regex, value, flags)
+ if match:
+ if not groups:
+ return match.group()
+ else:
+ items = list()
+ for item in groups:
+ items.append(match.group(item))
+ return items
+
+
+def ternary(value, true_val, false_val, none_val=None):
+ ''' value ? true_val : false_val '''
+ if value is None and none_val is not None:
+ return none_val
+ elif bool(value):
+ return true_val
+ else:
+ return false_val
+
+
+def regex_escape(string, re_type='python'):
+ string = to_text(string, errors='surrogate_or_strict', nonstring='simplerepr')
+ '''Escape all regular expressions special characters from STRING.'''
+ if re_type == 'python':
+ return re.escape(string)
+ elif re_type == 'posix_basic':
+ # list of BRE special chars:
+ # https://en.wikibooks.org/wiki/Regular_Expressions/POSIX_Basic_Regular_Expressions
+ return regex_replace(string, r'([].[^$*\\])', r'\\\1')
+ # TODO: implement posix_extended
+ # It's similar to, but different from python regex, which is similar to,
+ # but different from PCRE. It's possible that re.escape would work here.
+ # https://remram44.github.io/regex-cheatsheet/regex.html#programs
+ elif re_type == 'posix_extended':
+ raise AnsibleFilterError('Regex type (%s) not yet implemented' % re_type)
+ else:
+ raise AnsibleFilterError('Invalid regex type (%s)' % re_type)
+
+
+def from_yaml(data):
+ if isinstance(data, string_types):
+ # The ``text_type`` call here strips any custom
+ # string wrapper class, so that CSafeLoader can
+ # read the data
+ return yaml_load(text_type(to_text(data, errors='surrogate_or_strict')))
+ return data
+
+
+def from_yaml_all(data):
+ if isinstance(data, string_types):
+ # The ``text_type`` call here strips any custom
+ # string wrapper class, so that CSafeLoader can
+ # read the data
+ return yaml_load_all(text_type(to_text(data, errors='surrogate_or_strict')))
+ return data
+
+
+@pass_environment
+def rand(environment, end, start=None, step=None, seed=None):
+ if seed is None:
+ r = SystemRandom()
+ else:
+ r = Random(seed)
+ if isinstance(end, integer_types):
+ if not start:
+ start = 0
+ if not step:
+ step = 1
+ return r.randrange(start, end, step)
+ elif hasattr(end, '__iter__'):
+ if start or step:
+ raise AnsibleFilterError('start and step can only be used with integer values')
+ return r.choice(end)
+ else:
+ raise AnsibleFilterError('random can only be used on sequences and integers')
+
+
+def randomize_list(mylist, seed=None):
+ try:
+ mylist = list(mylist)
+ if seed:
+ r = Random(seed)
+ r.shuffle(mylist)
+ else:
+ shuffle(mylist)
+ except Exception:
+ pass
+ return mylist
+
+
+def get_hash(data, hashtype='sha1'):
+ try:
+ h = hashlib.new(hashtype)
+ except Exception as e:
+ # hash is not supported?
+ raise AnsibleFilterError(e)
+
+ h.update(to_bytes(data, errors='surrogate_or_strict'))
+ return h.hexdigest()
+
+
+def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=None, rounds=None, ident=None):
+ passlib_mapping = {
+ 'md5': 'md5_crypt',
+ 'blowfish': 'bcrypt',
+ 'sha256': 'sha256_crypt',
+ 'sha512': 'sha512_crypt',
+ }
+
+ hashtype = passlib_mapping.get(hashtype, hashtype)
+ try:
+ return passlib_or_crypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
+ except AnsibleError as e:
+ reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
+
+
+def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
+ uuid_namespace = namespace
+ if not isinstance(uuid_namespace, uuid.UUID):
+ try:
+ uuid_namespace = uuid.UUID(namespace)
+ except (AttributeError, ValueError) as e:
+ raise AnsibleFilterError("Invalid value '%s' for 'namespace': %s" % (to_native(namespace), to_native(e)))
+ # uuid.uuid5() requires bytes on Python 2 and bytes or text or Python 3
+ return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
+
+
+def mandatory(a, msg=None):
+ from jinja2.runtime import Undefined
+
+ ''' Make a variable mandatory '''
+ if isinstance(a, Undefined):
+ if a._undefined_name is not None:
+ name = "'%s' " % to_text(a._undefined_name)
+ else:
+ name = ''
+
+ if msg is not None:
+ raise AnsibleFilterError(to_native(msg))
+ else:
+ raise AnsibleFilterError("Mandatory variable %s not defined." % name)
+
+ return a
+
+
+def combine(*terms, **kwargs):
+ recursive = kwargs.pop('recursive', False)
+ list_merge = kwargs.pop('list_merge', 'replace')
+ if kwargs:
+ raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments")
+
+ # allow the user to do `[dict1, dict2, ...] | combine`
+ dictionaries = flatten(terms, levels=1)
+
+ # recursively check that every elements are defined (for jinja2)
+ recursive_check_defined(dictionaries)
+
+ if not dictionaries:
+ return {}
+
+ if len(dictionaries) == 1:
+ return dictionaries[0]
+
+ # merge all the dicts so that the dict at the end of the array have precedence
+ # over the dict at the beginning.
+ # we merge the dicts from the highest to the lowest priority because there is
+ # a huge probability that the lowest priority dict will be the biggest in size
+ # (as the low prio dict will hold the "default" values and the others will be "patches")
+ # and merge_hash create a copy of it's first argument.
+ # so high/right -> low/left is more efficient than low/left -> high/right
+ high_to_low_prio_dict_iterator = reversed(dictionaries)
+ result = next(high_to_low_prio_dict_iterator)
+ for dictionary in high_to_low_prio_dict_iterator:
+ result = merge_hash(dictionary, result, recursive, list_merge)
+
+ return result
+
+
+def comment(text, style='plain', **kw):
+ # Predefined comment types
+ comment_styles = {
+ 'plain': {
+ 'decoration': '# '
+ },
+ 'erlang': {
+ 'decoration': '% '
+ },
+ 'c': {
+ 'decoration': '// '
+ },
+ 'cblock': {
+ 'beginning': '/*',
+ 'decoration': ' * ',
+ 'end': ' */'
+ },
+ 'xml': {
+ 'beginning': '<!--',
+ 'decoration': ' - ',
+ 'end': '-->'
+ }
+ }
+
+ # Pointer to the right comment type
+ style_params = comment_styles[style]
+
+ if 'decoration' in kw:
+ prepostfix = kw['decoration']
+ else:
+ prepostfix = style_params['decoration']
+
+ # Default params
+ p = {
+ 'newline': '\n',
+ 'beginning': '',
+ 'prefix': (prepostfix).rstrip(),
+ 'prefix_count': 1,
+ 'decoration': '',
+ 'postfix': (prepostfix).rstrip(),
+ 'postfix_count': 1,
+ 'end': ''
+ }
+
+ # Update default params
+ p.update(style_params)
+ p.update(kw)
+
+ # Compose substrings for the final string
+ str_beginning = ''
+ if p['beginning']:
+ str_beginning = "%s%s" % (p['beginning'], p['newline'])
+ str_prefix = ''
+ if p['prefix']:
+ if p['prefix'] != p['newline']:
+ str_prefix = str(
+ "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
+ else:
+ str_prefix = str(
+ "%s" % (p['newline'])) * int(p['prefix_count'])
+ str_text = ("%s%s" % (
+ p['decoration'],
+ # Prepend each line of the text with the decorator
+ text.replace(
+ p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
+ # Remove trailing spaces when only decorator is on the line
+ "%s%s" % (p['decoration'], p['newline']),
+ "%s%s" % (p['decoration'].rstrip(), p['newline']))
+ str_postfix = p['newline'].join(
+ [''] + [p['postfix'] for x in range(p['postfix_count'])])
+ str_end = ''
+ if p['end']:
+ str_end = "%s%s" % (p['newline'], p['end'])
+
+ # Return the final string
+ return "%s%s%s%s%s" % (
+ str_beginning,
+ str_prefix,
+ str_text,
+ str_postfix,
+ str_end)
+
+
+@pass_environment
+def extract(environment, item, container, morekeys=None):
+ if morekeys is None:
+ keys = [item]
+ elif isinstance(morekeys, list):
+ keys = [item] + morekeys
+ else:
+ keys = [item, morekeys]
+
+ value = container
+ for key in keys:
+ value = environment.getitem(value, key)
+
+ return value
+
+
+def b64encode(string, encoding='utf-8'):
+ return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
+
+
+def b64decode(string, encoding='utf-8'):
+ return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
+
+
+def flatten(mylist, levels=None, skip_nulls=True):
+
+ ret = []
+ for element in mylist:
+ if skip_nulls and element in (None, 'None', 'null'):
+ # ignore null items
+ continue
+ elif is_sequence(element):
+ if levels is None:
+ ret.extend(flatten(element, skip_nulls=skip_nulls))
+ elif levels >= 1:
+ # decrement as we go down the stack
+ ret.extend(flatten(element, levels=(int(levels) - 1), skip_nulls=skip_nulls))
+ else:
+ ret.append(element)
+ else:
+ ret.append(element)
+
+ return ret
+
+
+def subelements(obj, subelements, skip_missing=False):
+ '''Accepts a dict or list of dicts, and a dotted accessor and produces a product
+ of the element and the results of the dotted accessor
+
+ >>> obj = [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}]
+ >>> subelements(obj, 'groups')
+ [({'name': 'alice', 'groups': ['wheel'], 'authorized': ['/tmp/alice/onekey.pub']}, 'wheel')]
+
+ '''
+ if isinstance(obj, dict):
+ element_list = list(obj.values())
+ elif isinstance(obj, list):
+ element_list = obj[:]
+ else:
+ raise AnsibleFilterError('obj must be a list of dicts or a nested dict')
+
+ if isinstance(subelements, list):
+ subelement_list = subelements[:]
+ elif isinstance(subelements, string_types):
+ subelement_list = subelements.split('.')
+ else:
+ raise AnsibleFilterTypeError('subelements must be a list or a string')
+
+ results = []
+
+ for element in element_list:
+ values = element
+ for subelement in subelement_list:
+ try:
+ values = values[subelement]
+ except KeyError:
+ if skip_missing:
+ values = []
+ break
+ raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
+ except TypeError:
+ raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
+ if not isinstance(values, list):
+ raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
+
+ for value in values:
+ results.append((element, value))
+
+ return results
+
+
+def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='value'):
+ ''' takes a dictionary and transforms it into a list of dictionaries,
+ with each having a 'key' and 'value' keys that correspond to the keys and values of the original '''
+
+ if not isinstance(mydict, Mapping):
+ raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
+
+ ret = []
+ for key in mydict:
+ ret.append({key_name: key, value_name: mydict[key]})
+ return ret
+
+
+def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='value'):
+ ''' takes a list of dicts with each having a 'key' and 'value' keys, and transforms the list into a dictionary,
+ effectively as the reverse of dict2items '''
+
+ if not is_sequence(mylist):
+ raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
+
+ try:
+ return dict((item[key_name], item[value_name]) for item in mylist)
+ except KeyError:
+ raise AnsibleFilterTypeError(
+ "items2dict requires each dictionary in the list to contain the keys '%s' and '%s', got %s instead."
+ % (key_name, value_name, mylist)
+ )
+ except TypeError:
+ raise AnsibleFilterTypeError("items2dict requires a list of dictionaries, got %s instead." % mylist)
+
+
+def path_join(paths):
+ ''' takes a sequence or a string, and return a concatenation
+ of the different members '''
+ if isinstance(paths, string_types):
+ return os.path.join(paths)
+ elif is_sequence(paths):
+ return os.path.join(*paths)
+ else:
+ raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
+
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ return {
+ # base 64
+ 'b64decode': b64decode,
+ 'b64encode': b64encode,
+
+ # uuid
+ 'to_uuid': to_uuid,
+
+ # json
+ 'to_json': to_json,
+ 'to_nice_json': to_nice_json,
+ 'from_json': json.loads,
+
+ # yaml
+ 'to_yaml': to_yaml,
+ 'to_nice_yaml': to_nice_yaml,
+ 'from_yaml': from_yaml,
+ 'from_yaml_all': from_yaml_all,
+
+ # path
+ 'basename': partial(unicode_wrap, os.path.basename),
+ 'dirname': partial(unicode_wrap, os.path.dirname),
+ 'expanduser': partial(unicode_wrap, os.path.expanduser),
+ 'expandvars': partial(unicode_wrap, os.path.expandvars),
+ 'path_join': path_join,
+ 'realpath': partial(unicode_wrap, os.path.realpath),
+ 'relpath': partial(unicode_wrap, os.path.relpath),
+ 'splitext': partial(unicode_wrap, os.path.splitext),
+ 'win_basename': partial(unicode_wrap, ntpath.basename),
+ 'win_dirname': partial(unicode_wrap, ntpath.dirname),
+ 'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
+
+ # file glob
+ 'fileglob': fileglob,
+
+ # types
+ 'bool': to_bool,
+ 'to_datetime': to_datetime,
+
+ # date formatting
+ 'strftime': strftime,
+
+ # quote string for shell usage
+ 'quote': quote,
+
+ # hash filters
+ # md5 hex digest of string
+ 'md5': md5s,
+ # sha1 hex digest of string
+ 'sha1': checksum_s,
+ # checksum of string as used by ansible for checksumming files
+ 'checksum': checksum_s,
+ # generic hashing
+ 'password_hash': get_encrypted_password,
+ 'hash': get_hash,
+
+ # regex
+ 'regex_replace': regex_replace,
+ 'regex_escape': regex_escape,
+ 'regex_search': regex_search,
+ 'regex_findall': regex_findall,
+
+ # ? : ;
+ 'ternary': ternary,
+
+ # random stuff
+ 'random': rand,
+ 'shuffle': randomize_list,
+
+ # undefined
+ 'mandatory': mandatory,
+
+ # comment-style decoration
+ 'comment': comment,
+
+ # debug
+ 'type_debug': lambda o: o.__class__.__name__,
+
+ # Data structures
+ 'combine': combine,
+ 'extract': extract,
+ 'flatten': flatten,
+ 'dict2items': dict_to_list_of_dict_key_value_elements,
+ 'items2dict': list_of_dict_key_value_elements_to_dict,
+ 'subelements': subelements,
+ 'split': partial(unicode_wrap, text_type.split),
+ }
diff --git a/lib/ansible/plugins/filter/dict2items.yml b/lib/ansible/plugins/filter/dict2items.yml
new file mode 100644
index 0000000..aa51826
--- /dev/null
+++ b/lib/ansible/plugins/filter/dict2items.yml
@@ -0,0 +1,45 @@
+DOCUMENTATION:
+ name: dict2items
+ author: Ansible core team
+ version_added: "2.6"
+ short_description: Convert a dictionary into an itemized list of dictionaries
+ positional: _input, key_name, value_name
+ description:
+ - Takes a dictionary and transforms it into a list of dictionaries, with each having a
+ C(key) and C(value) keys that correspond to the keys and values of the original.
+ options:
+ _input:
+ description:
+ - The dictionary to transform
+ type: dict
+ required: true
+ key_name:
+ description: The name of the property on the item representing the dictionary's keys.
+ type: str
+ default: key
+ version_added: "2.8"
+ value_name:
+ description: The name of the property on the item representing the dictionary's values.
+ type: str
+ default: value
+ version_added: "2.8"
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.items2dict
+
+EXAMPLES: |
+
+ # items => [ { "key": "a", "value": 1 }, { "key": "b", "value": 2 } ]
+ items: "{{ {'a': 1, 'b': 2}| dict2items}}"
+
+ vars:
+ files:
+ users: /etc/passwd
+ groups: /etc/group
+ files_dicts: "{{ files | dict2items(key_name='file', value_name='path') }}"
+
+RETURN:
+ _value:
+ description: A list of dictionaries.
+ type: list
+ elements: dict
diff --git a/lib/ansible/plugins/filter/difference.yml b/lib/ansible/plugins/filter/difference.yml
new file mode 100644
index 0000000..decc811
--- /dev/null
+++ b/lib/ansible/plugins/filter/difference.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: difference
+ author: Brian Coca (@bcoca)
+ version_added: "1.4"
+ short_description: the difference of one list from another
+ description:
+ - Provide a unique list of all the elements of the first list that do not appear in the second one.
+ options:
+ _input:
+ description: A list.
+ type: list
+ required: true
+ _second_list:
+ description: A list.
+ type: list
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.intersect
+ - plugin_type: filter
+ plugin: ansible.builtin.symmetric_difference
+ - plugin_type: filter
+ plugin: ansible.builtin.union
+ - plugin_type: filter
+ plugin: ansible.builtin.unique
+EXAMPLES: |
+ # return the elements of list1 not in list2
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | difference(list2) }}
+ # => [10]
+RETURN:
+ _value:
+ description: A unique list of the elements from the first list that do not appear on the second.
+ type: list
diff --git a/lib/ansible/plugins/filter/dirname.yml b/lib/ansible/plugins/filter/dirname.yml
new file mode 100644
index 0000000..52f7d5d
--- /dev/null
+++ b/lib/ansible/plugins/filter/dirname.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: dirname
+ author: ansible core team
+ version_added: "historical"
+ short_description: get a path's directory name
+ description:
+ - Returns the 'head' component of a path, basically everything that is not the 'basename'.
+ options:
+ _input:
+ description: A path.
+ type: path
+ required: true
+ seealso:
+ - plugin: ansible.builtin.basename
+ plugin_type: filter
+EXAMPLES: |
+
+ # To get the dir name of a file path, like '/etc/asdf' out of '/etc/asdf/foo.txt'
+ {{ mypath | dirname }}
+
+RETURN:
+ _value:
+ description: The directory portion of the original path.
+ type: path
diff --git a/lib/ansible/plugins/filter/encryption.py b/lib/ansible/plugins/filter/encryption.py
new file mode 100644
index 0000000..b6f4961
--- /dev/null
+++ b/lib/ansible/plugins/filter/encryption.py
@@ -0,0 +1,82 @@
+# Copyright: (c) 2021, Ansible Project
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from jinja2.runtime import Undefined
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.six import string_types, binary_type
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.parsing.vault import is_encrypted, VaultSecret, VaultLib
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def do_vault(data, secret, salt=None, vaultid='filter_default', wrap_object=False):
+
+ if not isinstance(secret, (string_types, binary_type, Undefined)):
+ raise AnsibleFilterTypeError("Secret passed is required to be a string, instead we got: %s" % type(secret))
+
+ if not isinstance(data, (string_types, binary_type, Undefined)):
+ raise AnsibleFilterTypeError("Can only vault strings, instead we got: %s" % type(data))
+
+ vault = ''
+ vs = VaultSecret(to_bytes(secret))
+ vl = VaultLib()
+ try:
+ vault = vl.encrypt(to_bytes(data), vs, vaultid, salt)
+ except UndefinedError:
+ raise
+ except Exception as e:
+ raise AnsibleFilterError("Unable to encrypt: %s" % to_native(e), orig_exc=e)
+
+ if wrap_object:
+ vault = AnsibleVaultEncryptedUnicode(vault)
+ else:
+ vault = to_native(vault)
+
+ return vault
+
+
+def do_unvault(vault, secret, vaultid='filter_default'):
+
+ if not isinstance(secret, (string_types, binary_type, Undefined)):
+ raise AnsibleFilterTypeError("Secret passed is required to be as string, instead we got: %s" % type(secret))
+
+ if not isinstance(vault, (string_types, binary_type, AnsibleVaultEncryptedUnicode, Undefined)):
+ raise AnsibleFilterTypeError("Vault should be in the form of a string, instead we got: %s" % type(vault))
+
+ data = ''
+ vs = VaultSecret(to_bytes(secret))
+ vl = VaultLib([(vaultid, vs)])
+ if isinstance(vault, AnsibleVaultEncryptedUnicode):
+ vault.vault = vl
+ data = vault.data
+ elif is_encrypted(vault):
+ try:
+ data = vl.decrypt(vault)
+ except UndefinedError:
+ raise
+ except Exception as e:
+ raise AnsibleFilterError("Unable to decrypt: %s" % to_native(e), orig_exc=e)
+ else:
+ data = vault
+
+ return to_native(data)
+
+
+class FilterModule(object):
+ ''' Ansible vault jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'vault': do_vault,
+ 'unvault': do_unvault,
+ }
+
+ return filters
diff --git a/lib/ansible/plugins/filter/expanduser.yml b/lib/ansible/plugins/filter/expanduser.yml
new file mode 100644
index 0000000..2aff468
--- /dev/null
+++ b/lib/ansible/plugins/filter/expanduser.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: basename
+ author: ansible core team
+ version_added: "1.5"
+ short_description: Returns a path with C(~) translation.
+ description:
+ - Translates C(~) in a path to the proper user's home directory.
+ options:
+ _input:
+ description: A string that contains a path.
+ type: path
+ required: true
+EXAMPLES: |
+
+ # To get '/home/myuser/stuff.txt' from '~/stuff.txt'.
+ {{ mypath | expanduser }}
+
+RETURN:
+ _value:
+ description: The translated path.
+ type: path
diff --git a/lib/ansible/plugins/filter/expandvars.yml b/lib/ansible/plugins/filter/expandvars.yml
new file mode 100644
index 0000000..02c201e
--- /dev/null
+++ b/lib/ansible/plugins/filter/expandvars.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: expandvars
+ author: ansible core team
+ version_added: "1.5"
+ short_description: expand environment variables
+ description:
+ - Will do a shell-like substitution of environment variables on the provided input.
+ options:
+ _input:
+ description: A string that contains environment variables.
+ type: str
+ required: true
+EXAMPLES: |
+
+ # To get '/home/myuser/stuff.txt' from '$HOME/stuff.txt'
+ {{ mypath | expandvars }}
+
+RETURN:
+ _value:
+ description: The string with translated environment variable values.
+ type: str
diff --git a/lib/ansible/plugins/filter/extract.yml b/lib/ansible/plugins/filter/extract.yml
new file mode 100644
index 0000000..2b4989d
--- /dev/null
+++ b/lib/ansible/plugins/filter/extract.yml
@@ -0,0 +1,39 @@
+DOCUMENTATION:
+ name: extract
+ version_added: "2.1"
+ short_description: extract a value based on an index or key
+ description:
+ - Extract a value from a list or dictionary based on an index/key.
+ - User must ensure that index or key used matches the type of container.
+ - Equivalent of using C(list[index]) and C(dictionary[key]) but useful as a filter to combine with C(map).
+ positional: _input, container, morekeys
+ options:
+ _input:
+ description: Index or key to extract.
+ type: raw
+ required: true
+ contianer:
+ description: Dictionary or list from which to extract a value.
+ type: raw
+ required: true
+ morekeys:
+ description: Indicies or keys to extract from the initial result (subkeys/subindices).
+ type: list
+ elements: dictionary
+ required: true
+
+EXAMPLES: |
+
+ # extracted => 'b', same as ['a', 'b', 'c'][1]
+ extracted: "{{ 1 | extract(['a', 'b', 'c']) }}"
+
+ # extracted_key => '2', same as {'a': 1, 'b': 2, 'c': 3}['b']
+ extracted_key: "{{ 'b' | extract({'a': 1, 'b': 2, 'c': 3}) }}"
+
+ # extracted_key_r => '2', same as [{'a': 1, 'b': 2, 'c': 3}, {'x': 9, 'y': 10}][0]['b']
+ extracted_key_r: "{{ 0 | extract([{'a': 1, 'b': 2, 'c': 3}, {'x': 9, 'y': 10}], morekeys='b') }}"
+
+RETURN:
+ _value:
+ description: Resulting merge of supplied dictionaries.
+ type: dict
diff --git a/lib/ansible/plugins/filter/fileglob.yml b/lib/ansible/plugins/filter/fileglob.yml
new file mode 100644
index 0000000..69e8a9b
--- /dev/null
+++ b/lib/ansible/plugins/filter/fileglob.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: fileglob
+ short_description: explode a path glob to matching files
+ description:
+ - Return a list of files that matches the supplied path glob pattern.
+ - Filters run on the controller, so the files are matched from the controller's file system.
+ positional: _input
+ options:
+ _input:
+ description: Path glob pattern.
+ type: string
+ required: true
+
+EXAMPLES: |
+ # found = ['/etc/hosts', '/etc/hasts']
+ found: "{{ '/etc/h?sts' | fileglob }}"
+
+RETURN:
+ _value:
+ description: List of files matched.
+ type: list
+ elements: string
diff --git a/lib/ansible/plugins/filter/flatten.yml b/lib/ansible/plugins/filter/flatten.yml
new file mode 100644
index 0000000..b909c3d
--- /dev/null
+++ b/lib/ansible/plugins/filter/flatten.yml
@@ -0,0 +1,32 @@
+DOCUMENTATION:
+ name: flatten
+ version_added: "2.5"
+ short_description: flatten lists within a list
+ description:
+ - For a given list, take any elements that are lists and insert their elements into the parent list directly.
+ positional: _input, levels, skip_nulls
+ options:
+ _input:
+ description: First dictionary to combine.
+ type: dict
+ required: true
+ levels:
+ description: Number of recursive list depths to flatten.
+ type: int
+ skip_nulls:
+ description: Skip C(null)/C(None) elements when inserting into the top list.
+ type: bool
+ default: true
+
+EXAMPLES: |
+
+ # [1,2,3,4,5,6]
+ flat: "{{ [1 , 2, [3, [4, 5]], 6] | flatten }}"
+
+ # [1,2,3,[4,5],6]
+ flatone: "{{ [1, 2, [3, [4, 5]], 6] | flatten(1) }}"
+
+RETURN:
+ _value:
+ description: The flattened list.
+ type: list
diff --git a/lib/ansible/plugins/filter/from_json.yml b/lib/ansible/plugins/filter/from_json.yml
new file mode 100644
index 0000000..4edc2bd
--- /dev/null
+++ b/lib/ansible/plugins/filter/from_json.yml
@@ -0,0 +1,25 @@
+DOCUMENTATION:
+ name: from_json
+ version_added: 'historical'
+ short_description: Convert JSON string into variable structure
+ description:
+ - Converts a JSON string representation into an equivalent structured Ansible variable.
+ - Ansible automatically converts JSON strings into variable structures in most contexts, use this plugin in contexts where automatic conversion does not happen.
+ notes:
+ - This filter functions as a wrapper to the Python C(json.loads) function.
+ options:
+ _input:
+ description: A JSON string.
+ type: string
+ required: true
+EXAMPLES: |
+ # variable from string variable containing a JSON document
+ {{ docker_config | from_json }}
+
+ # variable from string JSON document
+ {{ '{"a": true, "b": 54, "c": [1,2,3]}' | from_json }}
+
+RETURN:
+ _value:
+ description: The variable resulting from deserialization of the JSON document.
+ type: raw
diff --git a/lib/ansible/plugins/filter/from_yaml.yml b/lib/ansible/plugins/filter/from_yaml.yml
new file mode 100644
index 0000000..e9b1599
--- /dev/null
+++ b/lib/ansible/plugins/filter/from_yaml.yml
@@ -0,0 +1,25 @@
+DOCUMENTATION:
+ name: from_yaml
+ version_added: 'historical'
+ short_description: Convert YAML string into variable structure
+ description:
+ - Converts a YAML string representation into an equivalent structured Ansible variable.
+ - Ansible automatically converts YAML strings into variable structures in most contexts, use this plugin in contexts where automatic conversion does not happen.
+ notes:
+ - This filter functions as a wrapper to the L(Python pyyaml library, https://pypi.org/project/PyYAML/)'s C(yaml.safe_load) function.
+ options:
+ _input:
+ description: A YAML string.
+ type: string
+ required: true
+EXAMPLES: |
+ # variable from string variable containing a YAML document
+ {{ github_workflow | from_yaml}}
+
+ # variable from string JSON document
+ {{ '{"a": true, "b": 54, "c": [1,2,3]}' | from_yaml }}
+
+RETURN:
+ _value:
+ description: The variable resulting from deserializing the YAML document.
+ type: raw
diff --git a/lib/ansible/plugins/filter/from_yaml_all.yml b/lib/ansible/plugins/filter/from_yaml_all.yml
new file mode 100644
index 0000000..b179f1c
--- /dev/null
+++ b/lib/ansible/plugins/filter/from_yaml_all.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: from_yaml_all
+ version_added: 'historical'
+ short_description: Convert a series of YAML documents into a variable structure
+ description:
+ - Converts a YAML documents in a string representation into an equivalent structured Ansible variable.
+ - Ansible internally auto-converts YAML strings into variable structures in most contexts, but by default does not handle 'multi document' YAML files or strings.
+ - If multiple YAML documents are not supplied, this is the equivalend of using C(from_yaml).
+ notes:
+ - This filter functions as a wrapper to the Python C(yaml.safe_load_all) function, part of the L(pyyaml Python library, https://pypi.org/project/PyYAML/).
+ - Possible conflicts in variable names from the mulitple documents are resolved directly by the pyyaml library.
+ options:
+ _input:
+ description: A YAML string.
+ type: string
+ required: true
+
+EXAMPLES: |
+ # variable from string variable containing YAML documents
+ {{ multidoc_yaml_string | from_yaml_all }}
+
+ # variable from multidocument YAML string
+ {{ '---\n{"a": true, "b": 54, "c": [1,2,3]}\n...\n---{"x": 1}\n...\n' | from_yaml_all}}
+
+RETURN:
+ _value:
+ description: The variable resulting from deserializing the YAML documents.
+ type: raw
diff --git a/lib/ansible/plugins/filter/hash.yml b/lib/ansible/plugins/filter/hash.yml
new file mode 100644
index 0000000..0f5f315
--- /dev/null
+++ b/lib/ansible/plugins/filter/hash.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: checksum
+ version_added: "1.9"
+ short_description: hash of input data
+ description:
+ - Returns a configurable hash of the input data. Uses L(SHA-1, https://en.wikipedia.org/wiki/SHA-1) by default.
+ positional: _input
+ options:
+ _input:
+ description: Data to checksum.
+ type: raw
+ required: true
+ hashtype:
+ description:
+ - Type of algorithm to produce the hash.
+ - The list of available choices depends on the installed Python's hashlib.
+ type: string
+ default: sha1
+EXAMPLES: |
+ # sha1_hash => "109f4b3c50d7b0df729d299bc6f8e9ef9066971f"
+ sha1_hash: {{ 'test2' | hash('sha1') }}
+ # md5 => "5a105e8b9d40e1329780d62ea2265d8a"
+ md5: {{ 'test2' | hash('md5') }}
+
+RETURN:
+ _value:
+ description: The checksum of the input, as configured in I(hashtype).
+ type: string
diff --git a/lib/ansible/plugins/filter/human_readable.yml b/lib/ansible/plugins/filter/human_readable.yml
new file mode 100644
index 0000000..e3028ac
--- /dev/null
+++ b/lib/ansible/plugins/filter/human_readable.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: human_redable
+ version_added: "historical"
+ short_description: Make bytes/bits human readable
+ description:
+ - Convert byte or bit figures to more human readable formats.
+ positional: _input, isbits, unit
+ options:
+ _input:
+ description: Number of bytes, or bits. Depends on I(isbits).
+ type: int
+ required: true
+ isbits:
+ description: Whether the input is bits, instead of bytes.
+ type: bool
+ default: false
+ unit:
+ description: Unit to force output into. If none specified the largest unit arrived at will be used.
+ type: str
+ choices: [ 'Y', 'Z', 'E', 'P', 'T', 'G', 'M', 'K', 'B']
+EXAMPLES: |
+
+ # size => "1.15 GB"
+ size: "{{ 1232345345 | human_readable }}"
+
+ # size => "1.15 Gb"
+ size_bits: "{{ 1232345345 | human_readable(true) }}"
+
+ # size => "1175.26 MB"
+ size_MB: "{{ 1232345345 | human_readable(unit='M') }}"
+
+RETURN:
+ _value:
+ description: Human readable byte or bit size.
+ type: str
diff --git a/lib/ansible/plugins/filter/human_to_bytes.yml b/lib/ansible/plugins/filter/human_to_bytes.yml
new file mode 100644
index 0000000..f03deed
--- /dev/null
+++ b/lib/ansible/plugins/filter/human_to_bytes.yml
@@ -0,0 +1,34 @@
+DOCUMENTATION:
+ name: human_to_bytes
+ version_added: "historical"
+ short_description: Get bytes from string
+ description:
+ - Convert a human readable byte or bit string into a number bytes.
+ positional: _input, default_unit, isbits
+ options:
+ _input:
+ description: Human readable description of a number of bytes.
+ type: int
+ required: true
+ default_unit:
+ description: Unit to assume when input does not specify it.
+ type: str
+ choices: ['Y', 'Z', 'E', 'P', 'T', 'G', 'M', 'K', 'B']
+ isbits:
+ description: If C(True), force to interpret only bit input; if C(False), force bytes. Otherwise use the notation to guess.
+ type: bool
+EXAMPLES: |
+
+ # size => 1234803098
+ size: '{{ "1.15 GB" | human_to_bytes }}'
+
+ # size => 1234803098
+ size: '{{ "1.15" | human_to_bytes(deafult_unit="G") }}'
+
+ # this is an error, wants bits, got bytes
+ ERROR: '{{ "1.15 GB" | human_to_bytes(isbits=true) }}'
+
+RETURN:
+ _value:
+ description: Integer representing the bytes from the input.
+ type: int
diff --git a/lib/ansible/plugins/filter/intersect.yml b/lib/ansible/plugins/filter/intersect.yml
new file mode 100644
index 0000000..d811eca
--- /dev/null
+++ b/lib/ansible/plugins/filter/intersect.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: intersect
+ author: Brian Coca (@bcoca)
+ version_added: "1.4"
+ short_description: intersection of lists
+ description:
+ - Provide a list with the common elements from other lists.
+ options:
+ _input:
+ description: A list.
+ type: list
+ required: true
+ _second_list:
+ description: A list.
+ type: list
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.difference
+ - plugin_type: filter
+ plugin: ansible.builtin.symmetric_difference
+ - plugin_type: filter
+ plugin: ansible.builtin.unique
+ - plugin_type: filter
+ plugin: ansible.builtin.union
+EXAMPLES: |
+ # return only the common elements of list1 and list2
+ # list1: [1, 2, 5, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | intersect(list2) }}
+ # => [1, 2, 5, 3, 4]
+RETURN:
+ _value:
+ description: A list with unique elements common to both lists, also known as a set.
+ type: list
diff --git a/lib/ansible/plugins/filter/items2dict.yml b/lib/ansible/plugins/filter/items2dict.yml
new file mode 100644
index 0000000..1352c67
--- /dev/null
+++ b/lib/ansible/plugins/filter/items2dict.yml
@@ -0,0 +1,48 @@
+DOCUMENTATION:
+ name: items2dict
+ author: Ansible core team
+ version_added: "2.7"
+ short_description: Consolidate a list of itemized dictionaries into a dictionary
+ positional: _input, key_name, value_name
+ description:
+ - Takes a list of dicts with each having a C(key) and C(value) keys, and transforms the list into a dictionary,
+ effectively as the reverse of R(dict2items,ansible_collections.ansible.builtin.dict2items_filter).
+ options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Every dictionary must have keys C(key) and C(value).
+ type: list
+ elements: dict
+ required: true
+ key_name:
+ description: The name of the key in the element dictionaries that holds the key to use at destination.
+ type: str
+ default: key
+ value_name:
+ description: The name of the key in the element dictionaries that holds the value to use at destination.
+ type: str
+ default: value
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.dict2items
+
+EXAMPLES: |
+ # mydict => { "hi": "bye", "ciao": "ciao" }
+ mydict: {{ [{'key': 'hi', 'value': 'bye'}, {'key': 'ciao', 'value': 'ciao'} ]| items2dict}}
+
+ # The output is a dictionary with two key/value pairs:
+ # Application: payment
+ # Environment: dev
+ vars:
+ tags:
+ - key: Application
+ value: payment
+ - key: Environment
+ value: dev
+ consolidated: "{{ tags | items2dict }}"
+
+RETURN:
+ _value:
+ description: Dictionary with the consolidated key/values.
+ type: dict
diff --git a/lib/ansible/plugins/filter/log.yml b/lib/ansible/plugins/filter/log.yml
new file mode 100644
index 0000000..c7bb704
--- /dev/null
+++ b/lib/ansible/plugins/filter/log.yml
@@ -0,0 +1,33 @@
+DOCUMENTATION:
+ name: log
+ version_added: "1.9"
+ short_description: log of (math operation)
+ description:
+ - Math operation that returns the L(logarithm, https://en.wikipedia.org/wiki/Logarithm) to base N of the input number.
+ - By default, computes the L(natural logarithm, https://en.wikipedia.org/wiki/Natural_logarithm).
+ notes:
+ - This is a passthrough to Python's C(math.log).
+ positional: _input, base
+ options:
+ _input:
+ description: Number to operate on.
+ type: float
+ required: true
+ base:
+ description: Which base to use. Defaults to L(Euler's number, https://en.wikipedia.org/wiki/Euler%27s_number).
+ type: float
+ default: 2.718281828459045
+
+EXAMPLES: |
+
+ # 1.2920296742201791
+ eightlogfive: "{{ 8 | log(5) }}"
+
+ # 0.9030899869919435
+ eightlog10: "{{ 8 | log() }}"
+
+
+RETURN:
+ _value:
+ description: Resulting number.
+ type: float
diff --git a/lib/ansible/plugins/filter/mandatory.yml b/lib/ansible/plugins/filter/mandatory.yml
new file mode 100644
index 0000000..5addf15
--- /dev/null
+++ b/lib/ansible/plugins/filter/mandatory.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: mandatory
+ version_added: "historical"
+ short_description: make a variable's existance mandatory
+ description:
+ - Depending on context undefined variables can be ignored or skipped, this ensures they force an error.
+ positional: _input
+ options:
+ _input:
+ description: Mandatory expression.
+ type: raw
+ required: true
+EXAMPLES: |
+
+ # results in a Filter Error
+ {{ notdefined | mandatory }}
+
+RETURN:
+ _value:
+ description: The input if defined, otherwise an error.
+ type: raw
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
new file mode 100644
index 0000000..d4b6af7
--- /dev/null
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -0,0 +1,252 @@
+# Copyright 2014, Brian Coca <bcoca@ansible.com>
+# Copyright 2017, Ken Celenza <ken@networktocode.com>
+# Copyright 2017, Jason Edelman <jason@networktocode.com>
+# Copyright 2017, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import math
+
+from collections.abc import Hashable, Mapping, Iterable
+
+from jinja2.filters import pass_environment
+
+from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils.common.text import formatters
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+try:
+ from jinja2.filters import do_unique
+ HAS_UNIQUE = True
+except ImportError:
+ HAS_UNIQUE = False
+
+
+display = Display()
+
+
+@pass_environment
+# Use case_sensitive=None as a sentinel value, so we raise an error only when
+# explicitly set and cannot be handle (by Jinja2 w/o 'unique' or fallback version)
+def unique(environment, a, case_sensitive=None, attribute=None):
+
+ def _do_fail(e):
+ if case_sensitive is False or attribute:
+ raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
+ "as it does not support the parameters supplied", orig_exc=e)
+
+ error = e = None
+ try:
+ if HAS_UNIQUE:
+ c = list(do_unique(environment, a, case_sensitive=bool(case_sensitive), attribute=attribute))
+ except TypeError as e:
+ error = e
+ _do_fail(e)
+ except Exception as e:
+ error = e
+ _do_fail(e)
+ display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
+
+ if not HAS_UNIQUE or error:
+
+ # handle Jinja2 specific attributes when using Ansible's version
+ if case_sensitive is False or attribute:
+ raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
+ "you need a newer version of Jinja2 that provides their version of the filter.")
+
+ c = []
+ for x in a:
+ if x not in c:
+ c.append(x)
+
+ return c
+
+
+@pass_environment
+def intersect(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) & set(b)
+ else:
+ c = unique(environment, [x for x in a if x in b], True)
+ return c
+
+
+@pass_environment
+def difference(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) - set(b)
+ else:
+ c = unique(environment, [x for x in a if x not in b], True)
+ return c
+
+
+@pass_environment
+def symmetric_difference(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) ^ set(b)
+ else:
+ isect = intersect(environment, a, b)
+ c = [x for x in union(environment, a, b) if x not in isect]
+ return c
+
+
+@pass_environment
+def union(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) | set(b)
+ else:
+ c = unique(environment, a + b, True)
+ return c
+
+
+def logarithm(x, base=math.e):
+ try:
+ if base == 10:
+ return math.log10(x)
+ else:
+ return math.log(x, base)
+ except TypeError as e:
+ raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
+
+
+def power(x, y):
+ try:
+ return math.pow(x, y)
+ except TypeError as e:
+ raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
+
+
+def inversepower(x, base=2):
+ try:
+ if base == 2:
+ return math.sqrt(x)
+ else:
+ return math.pow(x, 1.0 / float(base))
+ except (ValueError, TypeError) as e:
+ raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
+
+
+def human_readable(size, isbits=False, unit=None):
+ ''' Return a human readable string '''
+ try:
+ return formatters.bytes_to_human(size, isbits, unit)
+ except TypeError as e:
+ raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
+ except Exception:
+ raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
+
+
+def human_to_bytes(size, default_unit=None, isbits=False):
+ ''' Return bytes count from a human readable string '''
+ try:
+ return formatters.human_to_bytes(size, default_unit, isbits)
+ except TypeError as e:
+ raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
+ except Exception:
+ raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
+
+
+def rekey_on_member(data, key, duplicates='error'):
+ """
+ Rekey a dict of dicts on another member
+
+ May also create a dict from a list of dicts.
+
+ duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
+ value would be duplicated or to overwrite previous entries if that's the case.
+ """
+ if duplicates not in ('error', 'overwrite'):
+ raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
+
+ new_obj = {}
+
+ # Ensure the positional args are defined - raise jinja2.exceptions.UndefinedError if not
+ bool(data) and bool(key)
+
+ if isinstance(data, Mapping):
+ iterate_over = data.values()
+ elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
+ iterate_over = data
+ else:
+ raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
+
+ for item in iterate_over:
+ if not isinstance(item, Mapping):
+ raise AnsibleFilterTypeError("List item is not a valid dict")
+
+ try:
+ key_elem = item[key]
+ except KeyError:
+ raise AnsibleFilterError("Key {0} was not found".format(key))
+ except TypeError as e:
+ raise AnsibleFilterTypeError(to_native(e))
+ except Exception as e:
+ raise AnsibleFilterError(to_native(e))
+
+ # Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
+ # minimum contain {key: key_elem}
+ if new_obj.get(key_elem, None):
+ if duplicates == 'error':
+ raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
+ elif duplicates == 'overwrite':
+ new_obj[key_elem] = item
+ else:
+ new_obj[key_elem] = item
+
+ return new_obj
+
+
+class FilterModule(object):
+ ''' Ansible math jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ # exponents and logarithms
+ 'log': logarithm,
+ 'pow': power,
+ 'root': inversepower,
+
+ # set theory
+ 'unique': unique,
+ 'intersect': intersect,
+ 'difference': difference,
+ 'symmetric_difference': symmetric_difference,
+ 'union': union,
+
+ # combinatorial
+ 'product': itertools.product,
+ 'permutations': itertools.permutations,
+ 'combinations': itertools.combinations,
+
+ # computer theory
+ 'human_readable': human_readable,
+ 'human_to_bytes': human_to_bytes,
+ 'rekey_on_member': rekey_on_member,
+
+ # zip
+ 'zip': zip,
+ 'zip_longest': itertools.zip_longest,
+
+ }
+
+ return filters
diff --git a/lib/ansible/plugins/filter/md5.yml b/lib/ansible/plugins/filter/md5.yml
new file mode 100644
index 0000000..c97870d
--- /dev/null
+++ b/lib/ansible/plugins/filter/md5.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: md5
+ version_added: "historical"
+ short_description: MD5 hash of input data
+ description:
+ - Returns an L(MD5 hash, https://en.wikipedia.org/wiki/MD5) of the input data
+ positional: _input
+ notes:
+ - This requires the MD5 algorithm to be available on the system, security contexts like FIPS might prevent this.
+ - MD5 has long been deemed insecure and is not recommended for security related uses.
+ options:
+ _input:
+ description: data to hash
+ type: raw
+ required: true
+
+EXAMPLES: |
+ # md5hash => "ae2b1fca515949e5d54fb22b8ed95575"
+ md5hash: "{{ 'testing' | md5 }}"
+
+RETURN:
+ _value:
+ description: The MD5 hash of the input.
+ type: string
diff --git a/lib/ansible/plugins/filter/password_hash.yml b/lib/ansible/plugins/filter/password_hash.yml
new file mode 100644
index 0000000..d12efb4
--- /dev/null
+++ b/lib/ansible/plugins/filter/password_hash.yml
@@ -0,0 +1,37 @@
+DOCUMENTATION:
+ name: password_hash
+ version_added: "historical"
+ short_description: convert input password into password_hash
+ description:
+ - Returns a password_hash of a secret.
+ positional: _input
+ notes:
+ - Algorithms available might be restricted by the system.
+ options:
+ _input:
+ description: Secret to hash.
+ type: string
+ required: true
+ hashtype:
+ description: Hashing algorithm to use.
+ type: string
+ default: sha512
+ choices: [ md5, blowfish, sha256, sha512 ]
+ salt:
+ description: Secret string that is used for the hashing, if none is provided a random one can be generated.
+ type: int
+ rounds:
+ description: Number of encryption rounds, default varies by algorithm used.
+ type: int
+ ident:
+ description: Algorithm identifier.
+ type: string
+
+EXAMPLES: |
+ # pwdhash => "$6$/bQCntzQ7VrgVcFa$VaMkmevkY1dqrx8neaenUDlVU.6L/.ojRbrnI4ID.yBHU6XON1cB422scCiXfUL5wRucMdLgJU0Fn38uoeBni/"
+ pwdhash: "{{ 'testing' | password_hash }}"
+
+RETURN:
+ _value:
+ description: The resulting password hash.
+ type: string
diff --git a/lib/ansible/plugins/filter/path_join.yml b/lib/ansible/plugins/filter/path_join.yml
new file mode 100644
index 0000000..d50deaa
--- /dev/null
+++ b/lib/ansible/plugins/filter/path_join.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: path_join
+ author: Anthony Bourguignon (@Toniob)
+ version_added: "2.10"
+ short_description: Join one or more path components
+ positional: _input
+ description:
+ - Returns a path obtained by joining one or more path components.
+ options:
+ _input:
+ description: A path, or a list of paths.
+ type: list
+ elements: str
+ required: true
+
+EXAMPLES: |
+
+ # If path == 'foo/bar' and file == 'baz.txt', the result is '/etc/foo/bar/subdir/baz.txt'
+ {{ ('/etc', path, 'subdir', file) | path_join }}
+
+ # equivalent to '/etc/subdir/{{filename}}'
+ wheremyfile: "{{ ['/etc', 'subdir', filename] | path_join }}"
+
+ # trustme => '/etc/apt/trusted.d/mykey.gpgp'
+ trustme: "{{ ['/etc', 'apt', 'trusted.d', 'mykey.gpg'] | path_join }}"
+
+RETURN:
+ _value:
+ description: The concatenated path.
+ type: str
diff --git a/lib/ansible/plugins/filter/permutations.yml b/lib/ansible/plugins/filter/permutations.yml
new file mode 100644
index 0000000..6e0202b
--- /dev/null
+++ b/lib/ansible/plugins/filter/permutations.yml
@@ -0,0 +1,26 @@
+DOCUMENTATION:
+ name: permutations
+ version_added: "historical"
+ short_description: permutations from the elements of a list
+ description:
+ - Create a list of the permutations of lists from the elements of a list.
+ - Unlike combinations, in permutations order is significant.
+ positional: _input, list_size
+ options:
+ _input:
+ description: Elements to base the permutations on.
+ type: list
+ required: true
+ list_size:
+ description: The size of the list for each permutation.
+ type: int
+ required: true
+
+EXAMPLES: |
+ # ptrs_of_two => [ [ 1, 2 ], [ 1, 3 ], [ 1, 4 ], [ 1, 5 ], [ 2, 1 ], [ 2, 3 ], [ 2, 4 ], [ 2, 5 ], [ 3, 1 ], [ 3, 2 ], [ 3, 4 ], [ 3, 5 ], [ 4, 1 ], [ 4, 2 ], [ 4, 3 ], [ 4, 5 ], [ 5, 1 ], [ 5, 2 ], [ 5, 3 ], [ 5, 4 ] ]
+ prts_of_two: "{{ [1,2,3,4,5] | permutations(2) }}"
+
+RETURN:
+ _value:
+ description: List of permutations lists resulting from the supplied elements and list size.
+ type: list
diff --git a/lib/ansible/plugins/filter/pow.yml b/lib/ansible/plugins/filter/pow.yml
new file mode 100644
index 0000000..da2fa42
--- /dev/null
+++ b/lib/ansible/plugins/filter/pow.yml
@@ -0,0 +1,34 @@
+DOCUMENTATION:
+ name: pow
+ version_added: "1.9"
+ short_description: power of (math operation)
+ description:
+ - Math operation that returns the Nth power of inputed number, C(X ^ N).
+ notes:
+ - This is a passthrough to Python's C(math.pow).
+ positional: _input, _power
+ options:
+ _input:
+ description: The base.
+ type: float
+ required: true
+ _power:
+ description: Which power (exponent) to use.
+ type: float
+ required: true
+
+EXAMPLES: |
+
+ # => 32768
+ eight_power_five: "{{ 8 | pow(5) }}"
+
+ # 4
+ square_of_2: "{{ 2 | pow(2) }}"
+
+ # me ^ 3
+ cube_me: "{{ me | pow(3) }}"
+
+RETURN:
+ _value:
+ description: Resulting number.
+ type: float
diff --git a/lib/ansible/plugins/filter/product.yml b/lib/ansible/plugins/filter/product.yml
new file mode 100644
index 0000000..5035522
--- /dev/null
+++ b/lib/ansible/plugins/filter/product.yml
@@ -0,0 +1,42 @@
+DOCUMENTATION:
+ name: product
+ version_added: "historical"
+ short_description: cartesian product of lists
+ description:
+ - Combines two lists into one with each element being the product of the elements of the input lists.
+ - Creates 'nested loops'. Looping over C(listA) and C(listB) is the same as looping over C(listA | product(listB)).
+ notes:
+ - This is a passthrough to Python's C(itertools.product)
+ positional: _input, _additional_lists, repeat
+ options:
+ _input:
+ description: First list.
+ type: list
+ required: true
+ _additional_lists: #TODO: *args, N possible additional lists
+ description: Additional list for the product.
+ type: list
+ required: false
+ repeat:
+ description: Number of times to repeat the product against itself.
+ default: 1
+ type: int
+EXAMPLES: |
+
+ # product => [ [ 1, "a" ], [ 1, "b" ], [ 1, "c" ], [ 2, "a" ], [ 2, "b" ], [ 2, "c" ], [ 3, "a" ], [ 3, "b" ], [ 3, "c" ], [ 4, "a" ], [ 4, "b" ], [ 4, "c" ], [ 5, "a" ], [ 5, "b" ], [ 5, "c" ] ]
+ product: "{{ [1,2,3,4,5] | product(['a', 'b', 'c']) }}"
+
+ # repeat_original => [ [ 1, 1 ], [ 1, 2 ], [ 2, 1 ], [ 2, 2 ] ]
+ repeat_original: "{{ [1,2] | product(repeat=2) }}"
+
+ # repeat_product => [ [ 1, "a", 1, "a" ], [ 1, "a", 1, "b" ], [ 1, "a", 2, "a" ], [ 1, "a", 2, "b" ], [ 1, "b", 1, "a" ], [ 1, "b", 1, "b" ], [ 1, "b", 2, "a" ], [ 1, "b", 2, "b" ], [ 2, "a", 1, "a" ], [ 2, "a", 1, "b" ], [ 2, "a", 2, "a" ], [ 2, "a", 2, "b" ], [ 2, "b", 1, "a" ], [ 2, "b", 1, "b" ], [ 2, "b", 2, "a" ], [ 2, "b", 2, "b" ] ]
+ repeat_product: "{{ [1,2] | product(['a', 'b'], repeat=2) }}"
+
+ # domains => [ 'example.com', 'ansible.com', 'redhat.com' ]
+ domains: "{{ [ 'example', 'ansible', 'redhat'] | product(['com']) | map('join', '.') }}"
+
+RETURN:
+ _value:
+ description: List of lists of combined elements from the input lists.
+ type: list
+ elements: list
diff --git a/lib/ansible/plugins/filter/quote.yml b/lib/ansible/plugins/filter/quote.yml
new file mode 100644
index 0000000..2d621ed
--- /dev/null
+++ b/lib/ansible/plugins/filter/quote.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: quote
+ version_added: "2.10"
+ short_description: shell quoting
+ description:
+ - Quote a string to safely use as in a POSIX shell.
+ notes:
+ - This is a passthrough to Python's C(shlex.quote).
+ positional: _input
+ options:
+ _input:
+ description: String to quote.
+ type: str
+ required: true
+
+EXAMPLES: |
+ - name: Run a shell command
+ shell: echo {{ string_value | quote }}
+
+RETURN:
+ _value:
+ description: Quoted string.
+ type: str
diff --git a/lib/ansible/plugins/filter/random.yml b/lib/ansible/plugins/filter/random.yml
new file mode 100644
index 0000000..b72dbb2
--- /dev/null
+++ b/lib/ansible/plugins/filter/random.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: random
+ version_added: "2.6"
+ short_description: random number or list item
+ description:
+ - Use the input to either select a random element of a list or generate a random number.
+ positional: _input, start, step, seed
+ options:
+ _input:
+ description: A number or list/sequence, if it is a number it is the top bound for random number generation, if it is a sequence or list, the source of the random element selected.
+ type: raw
+ required: true
+ start:
+ description: Bottom bound for the random number/element generated.
+ type: int
+ step:
+ description: Subsets the defined range by only using this value to select the increments of it between start and end.
+ type: int
+ default: 1
+ seed:
+ description: If specified use a pseudo random selection instead (repeatable).
+ type: str
+
+EXAMPLES: |
+
+ # can be any item from the list
+ random_item: "{{ ['a','b','c'] | random }}"
+
+ # cron line, select random minute repeatable for each host
+ "{{ 60 | random(seed=inventory_hostname) }} * * * * root /script/from/cron"
+
+RETURN:
+ _value:
+ description: Random number or list element.
+ type: raw
diff --git a/lib/ansible/plugins/filter/realpath.yml b/lib/ansible/plugins/filter/realpath.yml
new file mode 100644
index 0000000..12687b6
--- /dev/null
+++ b/lib/ansible/plugins/filter/realpath.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: realpath
+ author: darkone23 (@darkone23)
+ version_added: "1.8"
+ short_description: Turn path into real path
+ description:
+ - Resolves/follows symliknks to return the 'real path' from a given path.
+ - Filters alwasy run on controller so this path is resolved using the controller's filesystem.
+ options:
+ _input:
+ description: A path.
+ type: path
+ required: true
+EXAMPLES: |
+
+ realpath: {{ '/path/to/synlink' | realpath }}
+
+RETURN:
+ _value:
+ description: The canonical path.
+ type: path
diff --git a/lib/ansible/plugins/filter/regex_escape.yml b/lib/ansible/plugins/filter/regex_escape.yml
new file mode 100644
index 0000000..7819909
--- /dev/null
+++ b/lib/ansible/plugins/filter/regex_escape.yml
@@ -0,0 +1,29 @@
+DOCUMENTATION:
+ name: regex_escape
+ version_added: "2.8"
+ short_description: escape regex chars
+ description:
+ - Escape special characters in a string for use in a regular expression.
+ positional: _input, re_type
+ notes:
+ - posix_extended is not implemented yet
+ options:
+ _input:
+ description: String to escape.
+ type: str
+ required: true
+ re_type:
+ description: Which type of escaping to use.
+ type: str
+ default: python
+ choices: [python, posix_basic]
+
+EXAMPLES: |
+
+ # safe_for_regex => '\^f\.\*o\(\.\*\)\$'
+ safe_for_regex: "{{ '^f.*o(.*)$' | regex_escape() }}"
+
+RETURN:
+ _value:
+ description: Escaped string.
+ type: str
diff --git a/lib/ansible/plugins/filter/regex_findall.yml b/lib/ansible/plugins/filter/regex_findall.yml
new file mode 100644
index 0000000..707d6fa
--- /dev/null
+++ b/lib/ansible/plugins/filter/regex_findall.yml
@@ -0,0 +1,37 @@
+DOCUMENTATION:
+ name: regex_findall
+ version_added: "2.0"
+ short_description: extract all regex matches from string
+ description:
+ - Search in a string or extract all the parts of a string matching a regular expression.
+ positional: _input, _regex
+ options:
+ _input:
+ description: String to match against.
+ type: str
+ required: true
+ _regex:
+ description: Regular expression string that defines the match.
+ type: str
+ multiline:
+ description: Search across line endings if C(True), do not if otherwise.
+ type: bool
+ default: no
+ ignorecase:
+ description: Force the search to be case insensitive if C(True), case sensitive otherwise.
+ type: bool
+ default: no
+
+EXAMPLES: |
+
+ # all_pirates => ['CAR', 'tar', 'bar']
+ all_pirates: "{{ 'CAR\ntar\nfoo\nbar\n' | regex_findall('^.ar$', multiline=True, ignorecase=True) }}"
+
+ # get_ips => ['8.8.8.8', '8.8.4.4']
+ get_ips: "{{ 'Some DNS servers are 8.8.8.8 and 8.8.4.4' | regex_findall('\\b(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\\b') }}"
+
+RETURN:
+ _value:
+ description: List of matched strings.
+ type: list
+ elements: str
diff --git a/lib/ansible/plugins/filter/regex_replace.yml b/lib/ansible/plugins/filter/regex_replace.yml
new file mode 100644
index 0000000..0277b56
--- /dev/null
+++ b/lib/ansible/plugins/filter/regex_replace.yml
@@ -0,0 +1,46 @@
+DOCUMENTATION:
+ name: regex_replace
+ version_added: "2.0"
+ short_description: replace a string via regex
+ description:
+ - Replace a substring defined by a regular expression with another defined by another regular expression based on the first match.
+ notes:
+ - Maps to Python's C(re.replace).
+ positional: _input, _regex_match, _regex_replace
+ options:
+ _input:
+ description: String to match against.
+ type: str
+ required: true
+ _regex_match:
+ description: Regular expression string that defines the match.
+ type: int
+ required: true
+ _regex_replace:
+ description: Regular expression string that defines the replacement.
+ type: int
+ required: true
+ multiline:
+ description: Search across line endings if C(True), do not if otherwise.
+ type: bool
+ default: no
+ ignorecase:
+ description: Force the search to be case insensitive if C(True), case sensitive otherwise.
+ type: bool
+ default: no
+
+EXAMPLES: |
+
+ # whatami => 'able'
+ whatami: "{{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }}"
+
+ # commalocal => 'localhost, 80'
+ commalocal: "{{ 'localhost:80' | regex_replace('^(?P<host>.+):(?P<port>\\d+)$', '\\g<host>, \\g<port>') }}"
+
+ # piratecomment => '#CAR\n#tar\nfoo\n#bar\n'
+ piratecomment: "{{ 'CAR\ntar\nfoo\nbar\n' | regex_replace('^(.ar)$', '#\\1', multiline=True, ignorecase=True) }}"
+
+RETURN:
+ _value:
+ description: String with substitution (or original if no match).
+ type: str
diff --git a/lib/ansible/plugins/filter/regex_search.yml b/lib/ansible/plugins/filter/regex_search.yml
new file mode 100644
index 0000000..c61efb7
--- /dev/null
+++ b/lib/ansible/plugins/filter/regex_search.yml
@@ -0,0 +1,38 @@
+DOCUMENTATION:
+ name: regex_search
+ version_added: "2.0"
+ short_description: extract regex match from string
+ description:
+ - Search in a string to extract the part that matches the regular expression.
+ notes:
+ - Maps to Python's C(re.search).
+ positional: _input, _regex
+ options:
+ _input:
+ description: String to match against.
+ type: str
+ required: true
+ _regex:
+ description: Regular expression string that defines the match.
+ type: str
+ multiline:
+ description: Search across line endings if C(True), do not if otherwise.
+ type: bool
+ default: no
+ ignorecase:
+ description: Force the search to be case insensitive if C(True), case sensitive otherwise.
+ type: bool
+ default: no
+
+EXAMPLES: |
+
+ # db => 'database42'
+ db: "{{ 'server1/database42' | regex_search('database[0-9]+') }}"
+
+ # drinkat => 'BAR'
+ drinkat: "{{ 'foo\nBAR' | regex_search('^bar', multiline=True, ignorecase=True) }}"
+
+RETURN:
+ _value:
+ description: Matched string or empty string if no match.
+ type: str
diff --git a/lib/ansible/plugins/filter/rekey_on_member.yml b/lib/ansible/plugins/filter/rekey_on_member.yml
new file mode 100644
index 0000000..d7470ab
--- /dev/null
+++ b/lib/ansible/plugins/filter/rekey_on_member.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: rekey_on_member
+ version_added: "2.13"
+ short_description: Rekey a list of dicts into a dict using a member
+ positional: _input, '_key', duplicates
+ description: Iterate over several iterables in parallel, producing tuples with an item from each one.
+ options:
+ _input:
+ description: Original dictionary.
+ type: dict
+ required: yes
+ _key:
+ description: The key to rekey.
+ type: str
+ required: yes
+ duplicates:
+ description: How to handle duplicates.
+ type: str
+ default: error
+ choices: [overwrite, error]
+
+EXAMPLES: |
+
+ # mydict => {'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}
+ mydict: '{{ [{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}] | rekey_on_member("proto") }}'
+
+RETURN:
+ _value:
+ description: The resulting dictionary.
+ type: dict
diff --git a/lib/ansible/plugins/filter/relpath.yml b/lib/ansible/plugins/filter/relpath.yml
new file mode 100644
index 0000000..47611c7
--- /dev/null
+++ b/lib/ansible/plugins/filter/relpath.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: relpath
+ author: Jakub Jirutka (@jirutka)
+ version_added: "1.7"
+ short_description: Make a path relative
+ positional: _input, start
+ description:
+ - Converts the given path to a relative path from the I(start),
+ or relative to the directory given in I(start).
+ options:
+ _input:
+ description: A path.
+ type: str
+ required: true
+ start:
+ description: The directory the path should be relative to. If not supplied the current working directory will be used.
+ type: str
+
+EXAMPLES: |
+
+ # foobar => ../test/me.txt
+ testing: "{{ '/tmp/test/me.txt' | relpath('/tmp/other/') }}"
+ otherrelpath: "{{ mypath | relpath(mydir) }}"
+
+RETURN:
+ _value:
+ description: The relative path.
+ type: str
diff --git a/lib/ansible/plugins/filter/root.yml b/lib/ansible/plugins/filter/root.yml
new file mode 100644
index 0000000..4f52590
--- /dev/null
+++ b/lib/ansible/plugins/filter/root.yml
@@ -0,0 +1,32 @@
+DOCUMENTATION:
+ name: root
+ version_added: "1.9"
+ short_description: root of (math operation)
+ description:
+ - Math operation that returns the Nth root of inputed number C(X ^^ N).
+ positional: _input, base
+ options:
+ _input:
+ description: Number to operate on.
+ type: float
+ required: true
+ base:
+ description: Which root to take.
+ type: float
+ default: 2
+
+EXAMPLES: |
+
+ # => 8
+ fiveroot: "{{ 32768 | root (5) }}"
+
+ # 2
+ sqrt_of_2: "{{ 4 | root }}"
+
+ # me ^^ 3
+ cuberoot_me: "{{ me | root(3) }}"
+
+RETURN:
+ _value:
+ description: Resulting number.
+ type: float
diff --git a/lib/ansible/plugins/filter/sha1.yml b/lib/ansible/plugins/filter/sha1.yml
new file mode 100644
index 0000000..f80803b
--- /dev/null
+++ b/lib/ansible/plugins/filter/sha1.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: sha1
+ version_added: "historical"
+ short_description: SHA-1 hash of input data
+ description:
+ - Returns a L(SHA-1 hash, https://en.wikipedia.org/wiki/SHA-1) of the input data.
+ positional: _input
+ notes:
+ - This requires the SHA-1 algorithm to be available on the system, security contexts like FIPS might prevent this.
+ - SHA-1 has been deemed insecure and is not recommended for security related uses.
+ options:
+ _input:
+ description: Data to hash.
+ type: raw
+ required: true
+
+EXAMPLES: |
+ # sha1hash => "dc724af18fbdd4e59189f5fe768a5f8311527050"
+ sha1hash: "{{ 'testing' | sha1 }}"
+
+RETURN:
+ _value:
+ description: The SHA-1 hash of the input.
+ type: string
diff --git a/lib/ansible/plugins/filter/shuffle.yml b/lib/ansible/plugins/filter/shuffle.yml
new file mode 100644
index 0000000..a7c3e7e
--- /dev/null
+++ b/lib/ansible/plugins/filter/shuffle.yml
@@ -0,0 +1,27 @@
+DOCUMENTATION:
+ name: shuffle
+ version_added: "2.6"
+ short_description: randomize a list
+ description:
+ - Take the elements of the input list and return in a random order.
+ positional: _input
+ options:
+ _input:
+ description: A number or list to randomize.
+ type: list
+ elements: any
+ required: true
+ seed:
+ description: If specified use a pseudo random selection instead (repeatable).
+ type: str
+
+EXAMPLES: |
+
+ randomized_list: "{{ ['a','b','c'] | shuffle}}"
+ per_host_repeatable: "{{ ['a','b','c'] | shuffle(seed=inventory_hostname) }}"
+
+RETURN:
+ _value:
+ description: Random number or list element.
+ type: list
+ elements: any
diff --git a/lib/ansible/plugins/filter/split.yml b/lib/ansible/plugins/filter/split.yml
new file mode 100644
index 0000000..3e7b59e
--- /dev/null
+++ b/lib/ansible/plugins/filter/split.yml
@@ -0,0 +1,32 @@
+DOCUMENTATION:
+ name: split
+ version_added: "historical"
+ short_description: split a string into a list
+ description:
+ - Using Python's text object method C(split) we turn strings into lists via a 'spliting character'.
+ notes:
+ - This is a passthrough to Python's C(str.split).
+ positional: _input, _split_string
+ options:
+ _input:
+ description: A string to split.
+ type: str
+ required: true
+ _split_string:
+ description: A string on which to split the original.
+ type: str
+ default: ' '
+
+EXAMPLES: |
+
+ # listjojo => [ "jojo", "is", "a" ]
+ listjojo: "{{ 'jojo is a' | split }}"
+
+ # listjojocomma => [ "jojo is", "a" ]
+ listjojocomma: "{{ 'jojo is, a' | split(',' }}"
+
+RETURN:
+ _value:
+ description: List of substrings split from the original.
+ type: list
+ elements: str
diff --git a/lib/ansible/plugins/filter/splitext.yml b/lib/ansible/plugins/filter/splitext.yml
new file mode 100644
index 0000000..ea9cbce
--- /dev/null
+++ b/lib/ansible/plugins/filter/splitext.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: splitext
+ author: Matt Martz (@sivel)
+ version_added: "2.0"
+ short_description: split a path into root and file extension
+ positional: _input
+ description:
+ - Returns a list of two, with the elements consisting of filename root and extension.
+ options:
+ _input:
+ description: A path.
+ type: str
+ required: true
+
+EXAMPLES: |
+
+ # gobble => [ '/etc/make', 'conf' ]
+ gobble: "{{ '/etc/make.conf' | splitext }}"
+
+ # file_n_ext => [ 'ansible', 'cfg' ]
+ file_n_ext: "{{ 'ansible.cfg' | splitext }}"
+
+ # hoax => ['/etc/hoasdf', '']
+ hoax: '{{ "/etc//hoasdf/"|splitext }}'
+
+RETURN:
+ _value:
+ description: A list consisting of root of the path and the extension.
+ type: list
+ elements: str
diff --git a/lib/ansible/plugins/filter/strftime.yml b/lib/ansible/plugins/filter/strftime.yml
new file mode 100644
index 0000000..6cb8874
--- /dev/null
+++ b/lib/ansible/plugins/filter/strftime.yml
@@ -0,0 +1,45 @@
+DOCUMENTATION:
+ name: strftime
+ version_added: "2.4"
+ short_description: date formating
+ description:
+ - Using Python's C(strftime) function, take a data formating string and a date/time to create a formated date.
+ notes:
+ - This is a passthrough to Python's C(stftime).
+ positional: _input, second, utc
+ options:
+ _input:
+ description:
+ - A formating string following C(stftime) conventions.
+ - See L(the Python documentation, https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) for a reference.
+ type: str
+ required: true
+ second:
+ description: Datetime in seconds from C(epoch) to format, if not supplied C(gmttime/localtime) will be used.
+ type: int
+ utc:
+ description: Whether time supplied is in UTC.
+ type: bool
+ default: false
+
+EXAMPLES: |
+ # Display year-month-day
+ {{ '%Y-%m-%d' | strftime }}
+ # => "2021-03-19"
+
+ # Display hour:min:sec
+ {{ '%H:%M:%S' | strftime }}
+ # => "21:51:04"
+
+ # Use ansible_date_time.epoch fact
+ {{ '%Y-%m-%d %H:%M:%S' | strftime(ansible_date_time.epoch) }}
+ # => "2021-03-19 21:54:09"
+
+ # Use arbitrary epoch value
+ {{ '%Y-%m-%d' | strftime(0) }} # => 1970-01-01
+ {{ '%Y-%m-%d' | strftime(1441357287) }} # => 2015-09-04
+
+RETURN:
+ _value:
+ description: A formatted date/time string.
+ type: str
diff --git a/lib/ansible/plugins/filter/subelements.yml b/lib/ansible/plugins/filter/subelements.yml
new file mode 100644
index 0000000..a2d1a94
--- /dev/null
+++ b/lib/ansible/plugins/filter/subelements.yml
@@ -0,0 +1,38 @@
+DOCUMENTATION:
+ name: subelements
+ version_added: "2.7"
+ short_description: retuns a product of a list and it's elements
+ positional: _input, _subelement, skip_missing
+ description:
+ - This produces a product of an object and the subelement values of that object, similar to the subelements lookup. This lets you specify individual subelements to use in a template I(_input).
+ options:
+ _input:
+ description: Original list.
+ type: list
+ elements: any
+ required: yes
+ _subelement:
+ description: Label of property to extract from original list items.
+ type: str
+ required: yes
+ skip_missing:
+ description: If C(True), ignore missing subelements, otherwise missing subelements generate an error.
+ type: bool
+ default: no
+
+EXAMPLES: |
+ # data
+ users:
+ - groups: [1,2,3]
+ name: lola
+ - name: fernando
+ groups: [2,3,4]
+
+ # user_w_groups =>[ { "groups": [ 1, 2, 3 ], "name": "lola" }, 1 ], [ { "groups": [ 1, 2, 3 ], "name": "lola" }, 2 ], [ { "groups": [ 1, 2, 3 ], "name": "lola" }, 3 ], [ { "groups": [ 2, 3, 4 ], "name": "fernando" }, 2 ], [ { "groups": [ 2, 3, 4 ], "name": "fernando" }, 3 ], [ { "groups": [ 2, 3, 4 ], "name": "fernando" }, 4 ] ]
+ users_w_groups: {{ users | subelements('groups', skip_missing=True) }}
+
+RETURN:
+ _value:
+ description: List made of original list and product of the subelement list.
+ type: list
+ elements: any
diff --git a/lib/ansible/plugins/filter/symmetric_difference.yml b/lib/ansible/plugins/filter/symmetric_difference.yml
new file mode 100644
index 0000000..de4f3c6
--- /dev/null
+++ b/lib/ansible/plugins/filter/symmetric_difference.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: symmetric_difference
+ author: Brian Coca (@bcoca)
+ version_added: "1.4"
+ short_description: different items from two lists
+ description:
+ - Provide a unique list of all the elements unique to each list.
+ options:
+ _input:
+ description: A list.
+ type: list
+ required: true
+ _second_list:
+ description: A list.
+ type: list
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.difference
+ - plugin_type: filter
+ plugin: ansible.builtin.intersect
+ - plugin_type: filter
+ plugin: ansible.builtin.union
+ - plugin_type: filter
+ plugin: ansible.builtin.unique
+EXAMPLES: |
+ # return the elements of list1 not in list2 and the elements in list2 not in list1
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | symmetric_difference(list2) }}
+ # => [10, 11, 99]
+RETURN:
+ _value:
+ description: A unique list of the elements from two lists that are unique to each one.
+ type: list
diff --git a/lib/ansible/plugins/filter/ternary.yml b/lib/ansible/plugins/filter/ternary.yml
new file mode 100644
index 0000000..50ff767
--- /dev/null
+++ b/lib/ansible/plugins/filter/ternary.yml
@@ -0,0 +1,44 @@
+DOCUMENTATION:
+ name: ternary
+ author: Brian Coca (@bcoca)
+ version_added: '1.9'
+ short_description: Ternary operation filter
+ description:
+ - Return the first value if the input is C(True), the second if C(False).
+ positional: true_val, false_val
+ options:
+ _input:
+ description: A boolean expression, must evaluate to C(True) or C(False).
+ type: bool
+ required: true
+ true_val:
+ description: Value to return if the input is C(True).
+ type: any
+ required: true
+ false_val:
+ description: Value to return if the input is C(False).
+ type: any
+ none_val:
+ description: Value to return if the input is C(None). If not set, C(None) will be treated as C(False).
+ type: any
+ version_added: '2.8'
+ notes:
+ - Vars as values are evaluated even when not returned. This is due to them being evaluated before being passed into the filter.
+
+EXAMPLES: |
+ # set first 10 volumes rw, rest as dp
+ volume_mode: "{{ (item|int < 11)|ternary('rw', 'dp') }}"
+
+ # choose correct vpc subnet id, note that vars as values are evaluated even if not returned
+ vpc_subnet_id: "{{ (ec2_subnet_type == 'public') | ternary(ec2_vpc_public_subnet_id, ec2_vpc_private_subnet_id) }}"
+
+ - name: service-foo, use systemd module unless upstart is present, then use old service module
+ service:
+ state: restarted
+ enabled: yes
+ use: "{{ (ansible_service_mgr == 'upstart') | ternary('service', 'systemd') }}"
+
+RETURN:
+ _value:
+ description: The value indicated by the input.
+ type: any
diff --git a/lib/ansible/plugins/filter/to_datetime.yml b/lib/ansible/plugins/filter/to_datetime.yml
new file mode 100644
index 0000000..dbd476a
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_datetime.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: to_datetime
+ version_added: "2.4"
+ short_description: Get C(datetime) from string
+ description:
+ - Using the input string attempt to create a matching Python C(datetime) object.
+ notes:
+ - For a full list of format codes for working with Python date format strings, see
+ L(the Python documentation, https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior).
+ positional: _input
+ options:
+ _input:
+ description: A string containing date time information.
+ type: str
+ required: true
+ format:
+ description: C(strformat) formatted string that describes the expected format of the input string.
+ type: str
+
+EXAMPLES: |
+
+ # Get total amount of seconds between two dates. Default date format is %Y-%m-%d %H:%M:%S but you can pass your own format
+ secsdiff: '{{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime("%Y-%m-%d"))).total_seconds() }}'
+
+ # Get remaining seconds after delta has been calculated. NOTE: This does NOT convert years, days, hours, and so on to seconds. For that, use total_seconds()
+ {{ (("2016-08-14 20:00:12" | to_datetime) - ("2016-08-14 18:00:00" | to_datetime)).seconds }}
+ # This expression evaluates to "12" and not "132". Delta is 2 hours, 12 seconds
+
+ # get amount of days between two dates. This returns only number of days and discards remaining hours, minutes, and seconds
+ {{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime('%Y-%m-%d'))).days }}
+
+RETURN:
+ _value:
+ description: C(datetime) object from the represented value.
+ type: raw
diff --git a/lib/ansible/plugins/filter/to_json.yml b/lib/ansible/plugins/filter/to_json.yml
new file mode 100644
index 0000000..6f32d7c
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_json.yml
@@ -0,0 +1,69 @@
+DOCUMENTATION:
+ name: to_json
+ author: core team
+ version_added: 'historical'
+ short_description: Convert variable to JSON string
+ description:
+ - Converts an Ansible variable into a JSON string representation.
+ - This filter functions as a wrapper to the Python C(json.dumps) function.
+ - Ansible internally auto-converts JSON strings into variable structures so this plugin is used to force it into a JSON string.
+ options:
+ _input:
+ description: A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ vault_to_text:
+ description: Toggle to either unvault a vault or create the JSON version of a vaulted object.
+ type: bool
+ default: True
+ version_added: '2.9'
+ preprocess_unsafe:
+ description: Toggle to represent unsafe values directly in JSON or create a unsafe object in JSON.
+ type: bool
+ default: True
+ version_added: '2.9'
+ allow_nan:
+ description: When C(False), strict adherence to float value limits of the JSON specifications, so C(nan), C(inf) and C(-inf) values will produce errors.
+ When C(True), JavaScript equivalents will be used (C(NaN), C(Infinity), C(-Infinity)).
+ default: True
+ type: bool
+ check_circular:
+ description: Controls the usage of the internal circular reference detection, if off can result in overflow errors.
+ default: True
+ type: bool
+ ensure_ascii:
+ description: Escapes all non ASCII characters.
+ default: True
+ type: bool
+ indent:
+ description: Number of spaces to indent Python structures, mainly used for display to humans.
+ default: 0
+ type: integer
+ separators:
+ description: The C(item) and C(key) separator to be used in the serialized output,
+ default may change depending on I(indent) and Python version.
+ default: "(', ', ': ')"
+ type: tuple
+ skipkeys:
+ description: If C(True), keys that are not basic Python types will be skipped.
+ default: False
+ type: bool
+ sort_keys:
+ description: Affects sorting of dictionary keys.
+ default: False
+ type: bool
+ notes:
+ - Both I(vault_to_text) and I(preprocess_unsafe) defaulted to C(False) between Ansible 2.9 and 2.12.
+ - 'These parameters to C(json.dumps) will be ignored, as they are overriden internally: I(cls), I(default)'
+
+EXAMPLES: |
+ # dump variable in a template to create a JSON document
+ {{ docker_config|to_json }}
+
+ # same as above but 'prettier' (equivalent to to_nice_json filter)
+ {{ docker_config|to_json(indent=4, sort_keys=True) }}
+
+RETURN:
+ _value:
+ description: The JSON serialized string representing the variable structure inputted.
+ type: string
diff --git a/lib/ansible/plugins/filter/to_nice_json.yml b/lib/ansible/plugins/filter/to_nice_json.yml
new file mode 100644
index 0000000..bedc18b
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_nice_json.yml
@@ -0,0 +1,54 @@
+DOCUMENTATION:
+ name: to_nice_json
+ author: core team
+ version_added: 'historical'
+ short_description: Convert variable to 'nicely formatted' JSON string
+ description:
+ - Converts an Ansible variable into a 'nicely formatted' JSON string representation
+ - This filter functions as a wrapper to the Python C(json.dumps) function.
+ - Ansible automatically converts JSON strings into variable structures so this plugin is used to forcibly retain a JSON string.
+ options:
+ _input:
+ description: A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ vault_to_text:
+ description: Toggle to either unvault a vault or create the JSON version of a vaulted object.
+ type: bool
+ default: True
+ version_added: '2.9'
+ preprocess_unsafe:
+ description: Toggle to represent unsafe values directly in JSON or create a unsafe object in JSON.
+ type: bool
+ default: True
+ version_added: '2.9'
+ allow_nan:
+ description: When C(False), strict adherence to float value limits of the JSON specification, so C(nan), C(inf) and C(-inf) values will produce errors.
+ When C(True), JavaScript equivalents will be used (C(NaN), C(Infinity), C(-Infinity)).
+ default: True
+ type: bool
+ check_circular:
+ description: Controls the usage of the internal circular reference detection, if off can result in overflow errors.
+ default: True
+ type: bool
+ ensure_ascii:
+ description: Escapes all non ASCII characters.
+ default: True
+ type: bool
+ skipkeys:
+ description: If C(True), keys that are not basic Python types will be skipped.
+ default: False
+ type: bool
+ notes:
+ - Both I(vault_to_text) and I(preprocess_unsafe) defaulted to C(False) between Ansible 2.9 and 2.12.
+ - 'These parameters to C(json.dumps) will be ignored, they are overriden for internal use: I(cls), I(default), I(indent), I(separators), I(sort_keys).'
+
+EXAMPLES: |
+ # dump variable in a template to create a nicely formatted JSON document
+ {{ docker_config|to_nice_json }}
+
+
+RETURN:
+ _value:
+ description: The 'nicely formatted' JSON serialized string representing the variable structure inputted.
+ type: string
diff --git a/lib/ansible/plugins/filter/to_nice_yaml.yml b/lib/ansible/plugins/filter/to_nice_yaml.yml
new file mode 100644
index 0000000..4677a86
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_nice_yaml.yml
@@ -0,0 +1,39 @@
+DOCUMENTATION:
+ name: to_yaml
+ author: core team
+ version_added: 'historical'
+ short_description: Convert variable to YAML string
+ description:
+ - Converts an Ansible variable into a YAML string representation.
+ - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function.
+ - Ansible internally auto-converts YAML strings into variable structures so this plugin is used to force it into a YAML string.
+ positional: _input
+ options:
+ _input:
+ description: A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ indent:
+ description: Number of spaces to indent Python structures, mainly used for display to humans.
+ type: integer
+ sort_keys:
+ description: Affects sorting of dictionary keys.
+ default: True
+ type: bool
+ #allow_unicode:
+ # description:
+ # type: bool
+ # default: true
+ #default_style=None, canonical=None, width=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None
+ notes:
+ - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
+ - 'These parameters to C(yaml.dump) will be ignored, as they are overriden internally: I(default_flow_style)'
+
+EXAMPLES: |
+ # dump variable in a template to create a YAML document
+ {{ github_workflow | to_nice_yaml }}
+
+RETURN:
+ _value:
+ description: The YAML serialized string representing the variable structure inputted.
+ type: string
diff --git a/lib/ansible/plugins/filter/to_uuid.yml b/lib/ansible/plugins/filter/to_uuid.yml
new file mode 100644
index 0000000..266bf05
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_uuid.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: to_uuid
+ version_added: "2.9"
+ short_description: namespaced UUID generator
+ description:
+ - Use to generate namespeced Universal Unique ID.
+ positional: _input, namespace
+ options:
+ _input:
+ description: String to use as base fo the UUID.
+ type: str
+ required: true
+ namespace:
+ description: UUID namespace to use.
+ type: str
+ default: 361E6D51-FAEC-444A-9079-341386DA8E2E
+
+EXAMPLES: |
+
+ # To create a namespaced UUIDv5
+ uuid: "{{ string | to_uuid(namespace='11111111-2222-3333-4444-555555555555') }}"
+
+
+ # To create a namespaced UUIDv5 using the default Ansible namespace '361E6D51-FAEC-444A-9079-341386DA8E2E'
+ uuid: "{{ string | to_uuid }}"
+
+RETURN:
+ _value:
+ description: Generated UUID.
+ type: string
diff --git a/lib/ansible/plugins/filter/to_yaml.yml b/lib/ansible/plugins/filter/to_yaml.yml
new file mode 100644
index 0000000..2e7be60
--- /dev/null
+++ b/lib/ansible/plugins/filter/to_yaml.yml
@@ -0,0 +1,52 @@
+DOCUMENTATION:
+ name: to_yaml
+ author: core team
+ version_added: 'historical'
+ short_description: Convert variable to YAML string
+ description:
+ - Converts an Ansible variable into a YAML string representation.
+ - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function.
+ - Ansible automatically converts YAML strings into variable structures so this plugin is used to forcibly retain a YAML string.
+ positional: _input
+ options:
+ _input:
+ description: A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ indent:
+ description: Number of spaces to indent Python structures, mainly used for display to humans.
+ type: integer
+ sort_keys:
+ description: Affects sorting of dictionary keys.
+ default: True
+ type: bool
+ notes:
+ - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
+
+ # TODO: find docs for these
+ #allow_unicode:
+ # description:
+ # type: bool
+ # default: true
+ #default_flow_style
+ #default_style
+ #canonical=None,
+ #width=None,
+ #line_break=None,
+ #encoding=None,
+ #explicit_start=None,
+ #explicit_end=None,
+ #version=None,
+ #tags=None
+
+EXAMPLES: |
+ # dump variable in a template to create a YAML document
+ {{ github_workflow |to_yaml}}
+
+ # same as above but 'prettier' (equivalent to to_nice_yaml filter)
+ {{ docker_config|to_json(indent=4) }}
+
+RETURN:
+ _value:
+ description: The YAML serialized string representing the variable structure inputted.
+ type: string
diff --git a/lib/ansible/plugins/filter/type_debug.yml b/lib/ansible/plugins/filter/type_debug.yml
new file mode 100644
index 0000000..73f7946
--- /dev/null
+++ b/lib/ansible/plugins/filter/type_debug.yml
@@ -0,0 +1,20 @@
+DOCUMENTATION:
+ name: type_debug
+ author: Adrian Likins (@alikins)
+ version_added: "2.3"
+ short_description: show input data type
+ description:
+ - Returns the equivalent of Python's C(type) function.
+ options:
+ _input:
+ description: Variable or expression of which you want to determine type.
+ type: any
+ required: true
+EXAMPLES: |
+ # get type of 'myvar'
+ {{ myvar | type_debug }}
+
+RETURN:
+ _value:
+ description: The Python 'type' of the I(_input) provided.
+ type: string
diff --git a/lib/ansible/plugins/filter/union.yml b/lib/ansible/plugins/filter/union.yml
new file mode 100644
index 0000000..d737900
--- /dev/null
+++ b/lib/ansible/plugins/filter/union.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: union
+ author: Brian Coca (@bcoca)
+ version_added: "1.4"
+ short_description: union of lists
+ description:
+ - Provide a unique list of all the elements of two lists.
+ options:
+ _input:
+ description: A list.
+ type: list
+ required: true
+ _second_list:
+ description: A list.
+ type: list
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.difference
+ - plugin_type: filter
+ plugin: ansible.builtin.intersect
+ - plugin_type: filter
+ plugin: ansible.builtin.symmetric_difference
+ - plugin_type: filter
+ plugin: ansible.builtin.unique
+EXAMPLES: |
+ # return the unique elements of list1 added to list2
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | union(list2) }}
+ # => [1, 2, 5, 1, 3, 4, 10, 11, 99]
+RETURN:
+ _value:
+ description: A unique list of all the elements from both lists.
+ type: list
diff --git a/lib/ansible/plugins/filter/unique.yml b/lib/ansible/plugins/filter/unique.yml
new file mode 100644
index 0000000..c627816
--- /dev/null
+++ b/lib/ansible/plugins/filter/unique.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: unique
+ author: Brian Coca (@bcoca)
+ version_added: "1.4"
+ short_description: set of unique items of a list
+ description:
+ - Creates a list of unique elements (a set) from the provided input list.
+ options:
+ _input:
+ description: A list.
+ type: list
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.difference
+ - plugin_type: filter
+ plugin: ansible.builtin.intersect
+ - plugin_type: filter
+ plugin: ansible.builtin.symmetric_difference
+ - plugin_type: filter
+ plugin: ansible.builtin.union
+EXAMPLES: |
+ # return only the unique elements of list1
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ {{ list1 | unique }}
+ # => [1, 2, 5, 3, 4, 10]
+RETURN:
+ _value:
+ description: A list with unique elements, also known as a set.
+ type: list
diff --git a/lib/ansible/plugins/filter/unvault.yml b/lib/ansible/plugins/filter/unvault.yml
new file mode 100644
index 0000000..96a82ca
--- /dev/null
+++ b/lib/ansible/plugins/filter/unvault.yml
@@ -0,0 +1,36 @@
+DOCUMENTATION:
+ name: unvault
+ author: Brian Coca (@bcoca)
+ version_added: "2.12"
+ short_description: Open an Ansible Vault
+ description:
+ - Retrieve your information from an encrypted Ansible Vault.
+ positional: secret
+ options:
+ _input:
+ description: Vault string, or an C(AnsibleVaultEncryptedUnicode) string object.
+ type: string
+ required: true
+ secret:
+ description: Vault secret, the key that lets you open the vault.
+ type: string
+ required: true
+ vault_id:
+ description: Secret identifier, used internally to try to best match a secret when multiple are provided.
+ type: string
+ default: 'filter_default'
+
+EXAMPLES: |
+ # simply decrypt my key from a vault
+ vars:
+ mykey: "{{ myvaultedkey|unvault(passphrase) }} "
+
+ - name: save templated unvaulted data
+ template: src=dump_template_data.j2 dest=/some/key/clear.txt
+ vars:
+ template_data: '{{ secretdata|uvault(vaultsecret) }}'
+
+RETURN:
+ _value:
+ description: The string that was contained in the vault.
+ type: string
diff --git a/lib/ansible/plugins/filter/urldecode.yml b/lib/ansible/plugins/filter/urldecode.yml
new file mode 100644
index 0000000..dd76937
--- /dev/null
+++ b/lib/ansible/plugins/filter/urldecode.yml
@@ -0,0 +1,48 @@
+DOCUMENTATION:
+ name: urlsplit
+ version_added: "2.4"
+ short_description: get components from URL
+ description:
+ - Split a URL into its component parts.
+ positional: _input, query
+ options:
+ _input:
+ description: URL string to split.
+ type: str
+ required: true
+ query:
+ description: Specify a single component to return.
+ type: str
+ choices: ["fragment", "hostname", "netloc", "password", "path", "port", "query", "scheme", "username"]
+
+RETURN:
+ _value:
+ description:
+ - A dictionary with components as keyword and their value.
+ - If I(query) is provided, a string or integer will be returned instead, depending on I(query).
+ type: any
+
+EXAMPLES: |
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit }}
+ # =>
+ # {
+ # "fragment": "fragment",
+ # "hostname": "www.acme.com",
+ # "netloc": "user:password@www.acme.com:9000",
+ # "password": "password",
+ # "path": "/dir/index.html",
+ # "port": 9000,
+ # "query": "query=term",
+ # "scheme": "http",
+ # "username": "user"
+ # }
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('hostname') }}
+ # => 'www.acme.com'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('query') }}
+ # => 'query=term'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('path') }}
+ # => '/dir/index.html'
diff --git a/lib/ansible/plugins/filter/urls.py b/lib/ansible/plugins/filter/urls.py
new file mode 100644
index 0000000..fb7abc6
--- /dev/null
+++ b/lib/ansible/plugins/filter/urls.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import partial
+
+from urllib.parse import unquote_plus
+
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ return {
+ 'urldecode': partial(unquote_plus),
+ }
diff --git a/lib/ansible/plugins/filter/urlsplit.py b/lib/ansible/plugins/filter/urlsplit.py
new file mode 100644
index 0000000..cce54bb
--- /dev/null
+++ b/lib/ansible/plugins/filter/urlsplit.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: urlsplit
+ version_added: "2.4"
+ short_description: get components from URL
+ description:
+ - Split a URL into its component parts.
+ positional: _input, query
+ options:
+ _input:
+ description: URL string to split.
+ type: str
+ required: true
+ query:
+ description: Specify a single component to return.
+ type: str
+ choices: ["fragment", "hostname", "netloc", "password", "path", "port", "query", "scheme", "username"]
+'''
+
+EXAMPLES = r'''
+
+ parts: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit }}'
+ # =>
+ # {
+ # "fragment": "fragment",
+ # "hostname": "www.acme.com",
+ # "netloc": "user:password@www.acme.com:9000",
+ # "password": "password",
+ # "path": "/dir/index.html",
+ # "port": 9000,
+ # "query": "query=term",
+ # "scheme": "http",
+ # "username": "user"
+ # }
+
+ hostname: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("hostname") }}'
+ # => 'www.acme.com'
+
+ query: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("query") }}'
+ # => 'query=term'
+
+ path: '{{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit("path") }}'
+ # => '/dir/index.html'
+'''
+
+RETURN = r'''
+ _value:
+ description:
+ - A dictionary with components as keyword and their value.
+ - If I(query) is provided, a string or integer will be returned instead, depending on I(query).
+ type: any
+'''
+
+from urllib.parse import urlsplit
+
+from ansible.errors import AnsibleFilterError
+from ansible.utils import helpers
+
+
+def split_url(value, query='', alias='urlsplit'):
+
+ results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
+
+ # If a query is supplied, make sure it's valid then return the results.
+ # If no option is supplied, return the entire dictionary.
+ if query:
+ if query not in results:
+ raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
+ return results[query]
+ else:
+ return results
+
+
+# ---- Ansible filters ----
+class FilterModule(object):
+ ''' URI filter '''
+
+ def filters(self):
+ return {
+ 'urlsplit': split_url
+ }
diff --git a/lib/ansible/plugins/filter/vault.yml b/lib/ansible/plugins/filter/vault.yml
new file mode 100644
index 0000000..1ad541e
--- /dev/null
+++ b/lib/ansible/plugins/filter/vault.yml
@@ -0,0 +1,48 @@
+DOCUMENTATION:
+ name: vault
+ author: Brian Coca (@bcoca)
+ version_added: "2.12"
+ short_description: vault your secrets
+ description:
+ - Put your information into an encrypted Ansible Vault.
+ positional: secret
+ options:
+ _input:
+ description: Data to vault.
+ type: string
+ required: true
+ secret:
+ description: Vault secret, the key that lets you open the vault.
+ type: string
+ required: true
+ salt:
+ description:
+ - Encryption salt, will be random if not provided.
+ - While providing one makes the resulting encrypted string reproducible, it can lower the security of the vault.
+ type: string
+ vault_id:
+ description: Secret identifier, used internally to try to best match a secret when multiple are provided.
+ type: string
+ default: 'filter_default'
+ wrap_object:
+ description:
+ - This toggle can force the return of an C(AnsibleVaultEncryptedUnicode) string object, when C(False), you get a simple string.
+ - Mostly useful when combining with the C(to_yaml) filter to output the 'inline vault' format.
+ type: bool
+ default: False
+
+EXAMPLES: |
+ # simply encrypt my key in a vault
+ vars:
+ myvaultedkey: "{{ keyrawdata|vault(passphrase) }} "
+
+ - name: save templated vaulted data
+ template: src=dump_template_data.j2 dest=/some/key/vault.txt
+ vars:
+ mysalt: '{{2**256|random(seed=inventory_hostname)}}'
+ template_data: '{{ secretdata|vault(vaultsecret, salt=mysalt) }}'
+
+RETURN:
+ _value:
+ description: The vault string that contains the secret data (or C(AnsibleVaultEncryptedUnicode) string object).
+ type: string
diff --git a/lib/ansible/plugins/filter/win_basename.yml b/lib/ansible/plugins/filter/win_basename.yml
new file mode 100644
index 0000000..f89baa5
--- /dev/null
+++ b/lib/ansible/plugins/filter/win_basename.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: win_basename
+ author: ansible core team
+ version_added: "2.0"
+ short_description: Get a Windows path's base name
+ description:
+ - Returns the last name component of a Windows path, what is left in the string that is not 'win_dirname'.
+ options:
+ _input:
+ description: A Windows path.
+ type: str
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.win_dirname
+EXAMPLES: |
+
+ # To get the last name of a file Windows path, like 'foo.txt' out of 'C:\Users\asdf\foo.txt'
+ {{ mypath | win_basename }}
+
+RETURN:
+ _value:
+ description: The base name from the Windows path provided.
+ type: str
diff --git a/lib/ansible/plugins/filter/win_dirname.yml b/lib/ansible/plugins/filter/win_dirname.yml
new file mode 100644
index 0000000..dbc85c7
--- /dev/null
+++ b/lib/ansible/plugins/filter/win_dirname.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: win_dirname
+ author: ansible core team
+ version_added: "2.0"
+ short_description: Get a Windows path's directory
+ description:
+ - Returns the directory component of a Windows path, what is left in the string that is not 'win_basename'.
+ options:
+ _input:
+ description: A Windows path.
+ type: str
+ required: true
+ seealso:
+ - plugin_type: filter
+ plugin: ansible.builtin.win_basename
+EXAMPLES: |
+
+ # To get the last name of a file Windows path, like 'C:\users\asdf' out of 'C:\Users\asdf\foo.txt'
+ {{ mypath | win_dirname }}
+
+RETURN:
+ _value:
+ description: The directory from the Windows path provided.
+ type: str
diff --git a/lib/ansible/plugins/filter/win_splitdrive.yml b/lib/ansible/plugins/filter/win_splitdrive.yml
new file mode 100644
index 0000000..828d1dd
--- /dev/null
+++ b/lib/ansible/plugins/filter/win_splitdrive.yml
@@ -0,0 +1,29 @@
+DOCUMENTATION:
+ name: win_splitdrive
+ author: ansible core team
+ version_added: "2.0"
+ short_description: Split a Windows path by the drive letter
+ description:
+ - Returns a list with the first component being the drive letter and the second, the rest of the path.
+ options:
+ _input:
+ description: A Windows path.
+ type: str
+ required: true
+
+EXAMPLES: |
+
+ # To get the last name of a file Windows path, like ['C', '\Users\asdf\foo.txt'] out of 'C:\Users\asdf\foo.txt'
+ {{ mypath | win_splitdrive }}
+
+ # just the drive letter
+ {{ mypath | win_splitdrive | first }}
+
+ # path w/o drive letter
+ {{ mypath | win_splitdrive | last }}
+
+RETURN:
+ _value:
+ description: List in which the first element is the drive letter and the second the rest of the path.
+ type: list
+ elements: str
diff --git a/lib/ansible/plugins/filter/zip.yml b/lib/ansible/plugins/filter/zip.yml
new file mode 100644
index 0000000..20d7a9b
--- /dev/null
+++ b/lib/ansible/plugins/filter/zip.yml
@@ -0,0 +1,43 @@
+DOCUMENTATION:
+ name: zip
+ version_added: "2.3"
+ short_description: combine list elements
+ positional: _input, _additional_lists
+ description: Iterate over several iterables in parallel, producing tuples with an item from each one.
+ notes:
+ - This is mostly a passhtrough to Python's C(zip) function.
+ options:
+ _input:
+ description: Original list.
+ type: list
+ elements: any
+ required: yes
+ _additional_lists:
+ description: Additional list(s).
+ type: list
+ elements: any
+ required: yes
+ strict:
+ description: If C(True) return an error on mismatching list length, otherwise shortest list determines output.
+ type: bool
+ default: no
+
+EXAMPLES: |
+
+ # two => [[1, "a"], [2, "b"], [3, "c"], [4, "d"], [5, "e"], [6, "f"]]
+ two: "{{ [1,2,3,4,5,6] | zip(['a','b','c','d','e','f']) }}"
+
+ # three => [ [ 1, "a", "d" ], [ 2, "b", "e" ], [ 3, "c", "f" ] ]
+ three: "{{ [1,2,3] | zip(['a','b','c'], ['d','e','f']) }}"
+
+ # shorter => [[1, "a"], [2, "b"], [3, "c"]]
+ shorter: "{{ [1,2,3] | zip(['a','b','c','d','e','f']) }}"
+
+ # compose dict from lists of keys and values
+ mydcit: "{{ dict(keys_list | zip(values_list)) }}"
+
+RETURN:
+ _value:
+ description: List of lists made of elements matching the positions of the input lists.
+ type: list
+ elements: list
diff --git a/lib/ansible/plugins/filter/zip_longest.yml b/lib/ansible/plugins/filter/zip_longest.yml
new file mode 100644
index 0000000..db351b4
--- /dev/null
+++ b/lib/ansible/plugins/filter/zip_longest.yml
@@ -0,0 +1,36 @@
+DOCUMENTATION:
+ name: zip_longest
+ version_added: "2.3"
+ short_description: combine list elements, with filler
+ positional: _input, _additional_lists
+ description:
+ - Make an iterator that aggregates elements from each of the iterables.
+ If the iterables are of uneven length, missing values are filled-in with I(fillvalue).
+ Iteration continues until the longest iterable is exhausted.
+ notes:
+ - This is mostly a passhtrough to Python's C(itertools.zip_longest) function
+ options:
+ _input:
+ description: Original list.
+ type: list
+ elements: any
+ required: yes
+ _additional_lists:
+ description: Additional list(s).
+ type: list
+ elements: any
+ required: yes
+ fillvalue:
+ description: Filler value to add to output when one of the lists does not contain enough elements to match the others.
+ type: any
+
+EXAMPLES: |
+
+ # X_fill => [[1, "a", 21], [2, "b", 22], [3, "c", 23], ["X", "d", "X"], ["X", "e", "X"], ["X", "f", "X"]]
+ X_fill: "{{ [1,2,3] | zip_longest(['a','b','c','d','e','f'], [21, 22, 23], fillvalue='X') }}"
+
+RETURN:
+ _value:
+ description: List of lists made of elements matching the positions of the input lists.
+ type: list
+ elements: list
diff --git a/lib/ansible/plugins/httpapi/__init__.py b/lib/ansible/plugins/httpapi/__init__.py
new file mode 100644
index 0000000..0773921
--- /dev/null
+++ b/lib/ansible/plugins/httpapi/__init__.py
@@ -0,0 +1,87 @@
+# (c) 2018 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+
+from ansible.plugins import AnsiblePlugin
+
+
+class HttpApiBase(AnsiblePlugin):
+ def __init__(self, connection):
+ super(HttpApiBase, self).__init__()
+
+ self.connection = connection
+ self._become = False
+ self._become_pass = ''
+
+ def set_become(self, become_context):
+ self._become = become_context.become
+ self._become_pass = getattr(become_context, 'become_pass') or ''
+
+ def login(self, username, password):
+ """Call a defined login endpoint to receive an authentication token.
+
+ This should only be implemented if the API has a single endpoint which
+ can turn HTTP basic auth into a token which can be reused for the rest
+ of the calls for the session.
+ """
+ pass
+
+ def logout(self):
+ """ Call to implement session logout.
+
+ Method to clear session gracefully e.g. tokens granted in login
+ need to be revoked.
+ """
+ pass
+
+ def update_auth(self, response, response_text):
+ """Return per-request auth token.
+
+ The response should be a dictionary that can be plugged into the
+ headers of a request. The default implementation uses cookie data.
+ If no authentication data is found, return None
+ """
+ cookie = response.info().get('Set-Cookie')
+ if cookie:
+ return {'Cookie': cookie}
+
+ return None
+
+ def handle_httperror(self, exc):
+ """Overridable method for dealing with HTTP codes.
+
+ This method will attempt to handle known cases of HTTP status codes.
+ If your API uses status codes to convey information in a regular way,
+ you can override this method to handle it appropriately.
+
+ :returns:
+ * True if the code has been handled in a way that the request
+ may be resent without changes.
+ * False if the error cannot be handled or recovered from by the
+ plugin. This will result in the HTTPError being raised as an
+ exception for the caller to deal with as appropriate (most likely
+ by failing).
+ * Any other value returned is taken as a valid response from the
+ server without making another request. In many cases, this can just
+ be the original exception.
+ """
+ if exc.code == 401:
+ if self.connection._auth:
+ # Stored auth appears to be invalid, clear and retry
+ self.connection._auth = None
+ self.login(self.connection.get_option('remote_user'), self.connection.get_option('password'))
+ return True
+ else:
+ # Unauthorized and there's no token. Return an error
+ return False
+
+ return exc
+
+ @abstractmethod
+ def send_request(self, data, **message_kwargs):
+ """Prepares and sends request(s) to device."""
+ pass
diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
new file mode 100644
index 0000000..c0b4264
--- /dev/null
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -0,0 +1,463 @@
+# (c) 2017, Red Hat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import hashlib
+import os
+import string
+
+from collections.abc import Mapping
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.inventory.group import to_safe_group_name as original_safe
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins import AnsiblePlugin
+from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars, load_extra_vars
+
+display = Display()
+
+
+# Helper methods
+def to_safe_group_name(name):
+ # placeholder for backwards compat
+ return original_safe(name, force=True, silent=True)
+
+
+def detect_range(line=None):
+ '''
+ A helper function that checks a given host line to see if it contains
+ a range pattern described in the docstring above.
+
+ Returns True if the given line contains a pattern, else False.
+ '''
+ return '[' in line
+
+
+def expand_hostname_range(line=None):
+ '''
+ A helper function that expands a given line that contains a pattern
+ specified in top docstring, and returns a list that consists of the
+ expanded version.
+
+ The '[' and ']' characters are used to maintain the pseudo-code
+ appearance. They are replaced in this function with '|' to ease
+ string splitting.
+
+ References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
+ '''
+ all_hosts = []
+ if line:
+ # A hostname such as db[1:6]-node is considered to consists
+ # three parts:
+ # head: 'db'
+ # nrange: [1:6]; range() is a built-in. Can't use the name
+ # tail: '-node'
+
+ # Add support for multiple ranges in a host so:
+ # db[01:10:3]node-[01:10]
+ # - to do this we split off at the first [...] set, getting the list
+ # of hosts and then repeat until none left.
+ # - also add an optional third parameter which contains the step. (Default: 1)
+ # so range can be [01:10:2] -> 01 03 05 07 09
+
+ (head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
+ bounds = nrange.split(":")
+ if len(bounds) != 2 and len(bounds) != 3:
+ raise AnsibleError("host range must be begin:end or begin:end:step")
+ beg = bounds[0]
+ end = bounds[1]
+ if len(bounds) == 2:
+ step = 1
+ else:
+ step = bounds[2]
+ if not beg:
+ beg = "0"
+ if not end:
+ raise AnsibleError("host range must specify end value")
+ if beg[0] == '0' and len(beg) > 1:
+ rlen = len(beg) # range length formatting hint
+ if rlen != len(end):
+ raise AnsibleError("host range must specify equal-length begin and end formats")
+
+ def fill(x):
+ return str(x).zfill(rlen) # range sequence
+
+ else:
+ fill = str
+
+ try:
+ i_beg = string.ascii_letters.index(beg)
+ i_end = string.ascii_letters.index(end)
+ if i_beg > i_end:
+ raise AnsibleError("host range must have begin <= end")
+ seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
+ except ValueError: # not an alpha range
+ seq = range(int(beg), int(end) + 1, int(step))
+
+ for rseq in seq:
+ hname = ''.join((head, fill(rseq), tail))
+
+ if detect_range(hname):
+ all_hosts.extend(expand_hostname_range(hname))
+ else:
+ all_hosts.append(hname)
+
+ return all_hosts
+
+
+def get_cache_plugin(plugin_name, **kwargs):
+ try:
+ cache = CacheObject(plugin_name, **kwargs)
+ except AnsibleError as e:
+ if 'fact_caching_connection' in to_native(e):
+ raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
+ "to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
+ "[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
+ "ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
+ else:
+ raise e
+
+ if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
+ raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
+ 'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
+ 'from ansible.constants.'.format(plugin_name))
+ return cache
+
+
+class BaseInventoryPlugin(AnsiblePlugin):
+ """ Parses an Inventory Source"""
+
+ TYPE = 'generator'
+
+ # 3rd party plugins redefine this to
+ # use custom group name sanitization
+ # since constructed features enforce
+ # it by default.
+ _sanitize_group_name = staticmethod(to_safe_group_name)
+
+ def __init__(self):
+
+ super(BaseInventoryPlugin, self).__init__()
+
+ self._options = {}
+ self.inventory = None
+ self.display = display
+ self._vars = {}
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' Populates inventory from the given data. Raises an error on any parse failure
+ :arg inventory: a copy of the previously accumulated inventory data,
+ to be updated with any new data this plugin provides.
+ The inventory can be empty if no other source/plugin ran successfully.
+ :arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
+ it also has Vault support to automatically decrypt files.
+ :arg path: the string that represents the 'inventory source',
+ normally a path to a configuration file for this inventory,
+ but it can also be a raw string for this plugin to consume
+ :arg cache: a boolean that indicates if the plugin should use the cache or not
+ you can ignore if this plugin does not implement caching.
+ '''
+
+ self.loader = loader
+ self.inventory = inventory
+ self.templar = Templar(loader=loader)
+ self._vars = load_extra_vars(loader)
+
+ def verify_file(self, path):
+ ''' Verify if file is usable by this plugin, base does minimal accessibility check
+ :arg path: a string that was passed as an inventory source,
+ it normally is a path to a config file, but this is not a requirement,
+ it can also be parsed itself as the inventory data to process.
+ So only call this base class if you expect it to be a file.
+ '''
+
+ valid = False
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
+ return valid
+
+ def _populate_host_vars(self, hosts, variables, group=None, port=None):
+ if not isinstance(variables, Mapping):
+ raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
+
+ for host in hosts:
+ self.inventory.add_host(host, group=group, port=port)
+ for k in variables:
+ self.inventory.set_variable(host, k, variables[k])
+
+ def _read_config_data(self, path):
+ ''' validate config and set options as appropriate
+ :arg path: path to common yaml format config file for this plugin
+ '''
+
+ config = {}
+ try:
+ # avoid loader cache so meta: refresh_inventory can pick up config changes
+ # if we read more than once, fs cache should be good enough
+ config = self.loader.load_from_file(path, cache=False)
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ # a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
+ valid_names = getattr(self, '_redirected_names') or [self.NAME]
+
+ if not config:
+ # no data
+ raise AnsibleParserError("%s is empty" % (to_native(path)))
+ elif config.get('plugin') not in valid_names:
+ # this is not my config file
+ raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
+ elif not isinstance(config, Mapping):
+ # configs are dictionaries
+ raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
+
+ self.set_options(direct=config, var_options=self._vars)
+ if 'cache' in self._options and self.get_option('cache'):
+ cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
+ self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
+
+ return config
+
+ def _consume_options(self, data):
+ ''' update existing options from alternate configuration sources not normally used by Ansible.
+ Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
+ :arg data: key/value pairs that correspond to configuration options for this plugin
+ '''
+
+ for k in self._options:
+ if k in data:
+ self._options[k] = data.pop(k)
+
+ def _expand_hostpattern(self, hostpattern):
+ '''
+ Takes a single host pattern and returns a list of hostnames and an
+ optional port number that applies to all of them.
+ '''
+ # Can the given hostpattern be parsed as a host with an optional port
+ # specification?
+
+ try:
+ (pattern, port) = parse_address(hostpattern, allow_ranges=True)
+ except Exception:
+ # not a recognizable host pattern
+ pattern = hostpattern
+ port = None
+
+ # Once we have separated the pattern, we expand it into list of one or
+ # more hostnames, depending on whether it contains any [x:y] ranges.
+
+ if detect_range(pattern):
+ hostnames = expand_hostname_range(pattern)
+ else:
+ hostnames = [pattern]
+
+ return (hostnames, port)
+
+
+class BaseFileInventoryPlugin(BaseInventoryPlugin):
+ """ Parses a File based Inventory Source"""
+
+ TYPE = 'storage'
+
+ def __init__(self):
+
+ super(BaseFileInventoryPlugin, self).__init__()
+
+
+class Cacheable(object):
+
+ _cache = CacheObject()
+
+ @property
+ def cache(self):
+ return self._cache
+
+ def load_cache_plugin(self):
+ plugin_name = self.get_option('cache_plugin')
+ cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
+ self._cache = get_cache_plugin(plugin_name, **cache_options)
+
+ def get_cache_key(self, path):
+ return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
+
+ def _get_cache_prefix(self, path):
+ ''' create predictable unique prefix for plugin/inventory '''
+
+ m = hashlib.sha1()
+ m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
+ d1 = m.hexdigest()
+
+ n = hashlib.sha1()
+ n.update(to_bytes(path, errors='surrogate_or_strict'))
+ d2 = n.hexdigest()
+
+ return 's_'.join([d1[:5], d2[:5]])
+
+ def clear_cache(self):
+ self._cache.flush()
+
+ def update_cache_if_changed(self):
+ self._cache.update_cache_if_changed()
+
+ def set_cache_plugin(self):
+ self._cache.set_cache()
+
+
+class Constructable(object):
+
+ def _compose(self, template, variables, disable_lookups=True):
+ ''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
+ t = self.templar
+
+ try:
+ use_extra = self.get_option('use_extra_vars')
+ except Exception:
+ use_extra = False
+
+ if use_extra:
+ t.available_variables = combine_vars(variables, self._vars)
+ else:
+ t.available_variables = variables
+
+ return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string),
+ disable_lookups=disable_lookups)
+
+ def _set_composite_vars(self, compose, variables, host, strict=False):
+ ''' loops over compose entries to create vars for hosts '''
+ if compose and isinstance(compose, dict):
+ for varname in compose:
+ try:
+ composite = self._compose(compose[varname], variables)
+ except Exception as e:
+ if strict:
+ raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
+ continue
+ self.inventory.set_variable(host, varname, composite)
+
+ def _add_host_to_composed_groups(self, groups, variables, host, strict=False, fetch_hostvars=True):
+ ''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
+ # process each 'group entry'
+ if groups and isinstance(groups, dict):
+ if fetch_hostvars:
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ self.templar.available_variables = variables
+ for group_name in groups:
+ conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
+ group_name = self._sanitize_group_name(group_name)
+ try:
+ result = boolean(self.templar.template(conditional))
+ except Exception as e:
+ if strict:
+ raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
+ continue
+
+ if result:
+ # ensure group exists, use sanitized name
+ group_name = self.inventory.add_group(group_name)
+ # add host to group
+ self.inventory.add_child(group_name, host)
+
+ def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
+ ''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
+ if keys and isinstance(keys, list):
+ for keyed in keys:
+ if keyed and isinstance(keyed, dict):
+
+ if fetch_hostvars:
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ try:
+ key = self._compose(keyed.get('key'), variables)
+ except Exception as e:
+ if strict:
+ raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
+ continue
+ default_value_name = keyed.get('default_value', None)
+ trailing_separator = keyed.get('trailing_separator')
+ if trailing_separator is not None and default_value_name is not None:
+ raise AnsibleParserError("parameters are mutually exclusive for keyed groups: default_value|trailing_separator")
+ if key or (key == '' and default_value_name is not None):
+ prefix = keyed.get('prefix', '')
+ sep = keyed.get('separator', '_')
+ raw_parent_name = keyed.get('parent_group', None)
+ if raw_parent_name:
+ try:
+ raw_parent_name = self.templar.template(raw_parent_name)
+ except AnsibleError as e:
+ if strict:
+ raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
+ continue
+
+ new_raw_group_names = []
+ if isinstance(key, string_types):
+ # if key is empty, 'default_value' will be used as group name
+ if key == '' and default_value_name is not None:
+ new_raw_group_names.append(default_value_name)
+ else:
+ new_raw_group_names.append(key)
+ elif isinstance(key, list):
+ for name in key:
+ # if list item is empty, 'default_value' will be used as group name
+ if name == '' and default_value_name is not None:
+ new_raw_group_names.append(default_value_name)
+ else:
+ new_raw_group_names.append(name)
+ elif isinstance(key, Mapping):
+ for (gname, gval) in key.items():
+ bare_name = '%s%s%s' % (gname, sep, gval)
+ if gval == '':
+ # key's value is empty
+ if default_value_name is not None:
+ bare_name = '%s%s%s' % (gname, sep, default_value_name)
+ elif trailing_separator is False:
+ bare_name = gname
+ new_raw_group_names.append(bare_name)
+ else:
+ raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
+
+ for bare_name in new_raw_group_names:
+ if prefix == '' and self.get_option('leading_separator') is False:
+ sep = ''
+ gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
+ result_gname = self.inventory.add_group(gname)
+ self.inventory.add_host(host, result_gname)
+
+ if raw_parent_name:
+ parent_name = self._sanitize_group_name(raw_parent_name)
+ self.inventory.add_group(parent_name)
+ self.inventory.add_child(parent_name, result_gname)
+
+ else:
+ # exclude case of empty list and dictionary, because these are valid constructions
+ # simply no groups need to be constructed, but are still falsy
+ if strict and key not in ([], {}):
+ raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
+ else:
+ raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
diff --git a/lib/ansible/plugins/inventory/advanced_host_list.py b/lib/ansible/plugins/inventory/advanced_host_list.py
new file mode 100644
index 0000000..1b5d868
--- /dev/null
+++ b/lib/ansible/plugins/inventory/advanced_host_list.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: advanced_host_list
+ version_added: "2.4"
+ short_description: Parses a 'host list' with ranges
+ description:
+ - Parses a host list string as a comma separated values of hosts and supports host ranges.
+ - This plugin only applies to inventory sources that are not paths and contain at least one comma.
+'''
+
+EXAMPLES = '''
+ # simple range
+ # ansible -i 'host[1:10],' -m ping
+
+ # still supports w/o ranges also
+ # ansible-playbook -i 'localhost,' play.yml
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'advanced_host_list'
+
+ def verify_file(self, host_list):
+
+ valid = False
+ b_path = to_bytes(host_list, errors='surrogate_or_strict')
+ if not os.path.exists(b_path) and ',' in host_list:
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, host_list, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, host_list)
+
+ try:
+ for h in host_list.split(','):
+ h = h.strip()
+ if h:
+ try:
+ (hostnames, port) = self._expand_hostpattern(h)
+ except AnsibleError as e:
+ self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
+ hostnames = [h]
+ port = None
+
+ for host in hostnames:
+ if host not in self.inventory.hosts:
+ self.inventory.add_host(host, group='ungrouped', port=port)
+ except Exception as e:
+ raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
diff --git a/lib/ansible/plugins/inventory/auto.py b/lib/ansible/plugins/inventory/auto.py
new file mode 100644
index 0000000..45941ca
--- /dev/null
+++ b/lib/ansible/plugins/inventory/auto.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: auto
+ author:
+ - Matt Davis (@nitzmahone)
+ version_added: "2.5"
+ short_description: Loads and executes an inventory plugin specified in a YAML config
+ description:
+ - By enabling the C(auto) inventory plugin, any YAML inventory config file with a
+ C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
+ config. This effectively provides automatic enabling of all installed/accessible inventory plugins.
+ - To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
+'''
+
+EXAMPLES = '''
+# This plugin is not intended for direct use; it is a fallback mechanism for automatic enabling of
+# all installed inventory plugins.
+'''
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.plugins.loader import inventory_loader
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'auto'
+
+ def verify_file(self, path):
+ if not path.endswith('.yml') and not path.endswith('.yaml'):
+ return False
+ return super(InventoryModule, self).verify_file(path)
+
+ def parse(self, inventory, loader, path, cache=True):
+ config_data = loader.load_from_file(path, cache=False)
+
+ try:
+ plugin_name = config_data.get('plugin', None)
+ except AttributeError:
+ plugin_name = None
+
+ if not plugin_name:
+ raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
+
+ plugin = inventory_loader.get(plugin_name)
+
+ if not plugin:
+ raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
+
+ if not plugin.verify_file(path):
+ raise AnsibleParserError("inventory source '{0}' could not be verified by inventory plugin '{1}'".format(path, plugin_name))
+
+ self.display.v("Using inventory plugin '{0}' to process inventory source '{1}'".format(plugin._load_name, path))
+ plugin.parse(inventory, loader, path, cache=cache)
+ try:
+ plugin.update_cache_if_changed()
+ except AttributeError:
+ pass
diff --git a/lib/ansible/plugins/inventory/constructed.py b/lib/ansible/plugins/inventory/constructed.py
new file mode 100644
index 0000000..dd630c6
--- /dev/null
+++ b/lib/ansible/plugins/inventory/constructed.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: constructed
+ version_added: "2.4"
+ short_description: Uses Jinja2 to construct vars and groups based on existing inventory.
+ description:
+ - Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
+ - The Jinja2 conditionals that qualify a host for membership.
+ - The Jinja2 expressions are calculated and assigned to the variables
+ - Only variables already available from previous inventories or the fact cache can be used for templating.
+ - When I(strict) is False, failed expressions will be ignored (assumes vars were missing).
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'constructed' plugin.
+ required: True
+ choices: ['ansible.builtin.constructed', 'constructed']
+ use_vars_plugins:
+ description:
+ - Normally, for performance reasons, vars plugins get executed after the inventory sources complete the base inventory,
+ this option allows for getting vars related to hosts/groups from those plugins.
+ - The host_group_vars (enabled by default) 'vars plugin' is the one responsible for reading host_vars/ and group_vars/ directories.
+ - This will execute all vars plugins, even those that are not supposed to execute at the 'inventory' stage.
+ See vars plugins docs for details on 'stage'.
+ required: false
+ default: false
+ type: boolean
+ version_added: '2.11'
+ extends_documentation_fragment:
+ - constructed
+'''
+
+EXAMPLES = r'''
+ # inventory.config file in YAML format
+ plugin: ansible.builtin.constructed
+ strict: False
+ compose:
+ var_sum: var1 + var2
+
+ # this variable will only be set if I have a persistent fact cache enabled (and have non expired facts)
+ # `strict: False` will skip this instead of producing an error if it is missing facts.
+ server_type: "ansible_hostname | regex_replace ('(.{6})(.{2}).*', '\\2')"
+ groups:
+ # simple name matching
+ webservers: inventory_hostname.startswith('web')
+
+ # using ec2 'tags' (assumes aws inventory)
+ development: "'devel' in (ec2_tags|list)"
+
+ # using other host properties populated in inventory
+ private_only: not (public_dns_name is defined or ip_address is defined)
+
+ # complex group membership
+ multi_group: (group_names | intersect(['alpha', 'beta', 'omega'])) | length >= 2
+
+ keyed_groups:
+ # this creates a group per distro (distro_CentOS, distro_Debian) and assigns the hosts that have matching values to it,
+ # using the default separator "_"
+ - prefix: distro
+ key: ansible_distribution
+
+ # the following examples assume the first inventory is from the `aws_ec2` plugin
+ # this creates a group per ec2 architecture and assign hosts to the matching ones (arch_x86_64, arch_sparc, etc)
+ - prefix: arch
+ key: architecture
+
+ # this creates a group per ec2 region like "us_west_1"
+ - prefix: ""
+ separator: ""
+ key: placement.region
+
+ # this creates a common parent group for all ec2 availability zones
+ - key: placement.availability_zone
+ parent_group: all_ec2_zones
+'''
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError, AnsibleOptionsError
+from ansible.inventory.helpers import get_group_vars
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.module_utils._text import to_native
+from ansible.utils.vars import combine_vars
+from ansible.vars.fact_cache import FactCache
+from ansible.vars.plugins import get_vars_from_inventory_sources
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ """ constructs groups and vars using Jinja2 template expressions """
+
+ NAME = 'constructed'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self._cache = FactCache()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def get_all_host_vars(self, host, loader, sources):
+ ''' requires host object '''
+ return combine_vars(self.host_groupvars(host, loader, sources), self.host_vars(host, loader, sources))
+
+ def host_groupvars(self, host, loader, sources):
+ ''' requires host object '''
+ gvars = get_group_vars(host.get_groups())
+
+ if self.get_option('use_vars_plugins'):
+ gvars = combine_vars(gvars, get_vars_from_inventory_sources(loader, sources, host.get_groups(), 'all'))
+
+ return gvars
+
+ def host_vars(self, host, loader, sources):
+ ''' requires host object '''
+ hvars = host.get_vars()
+
+ if self.get_option('use_vars_plugins'):
+ hvars = combine_vars(hvars, get_vars_from_inventory_sources(loader, sources, [host], 'all'))
+
+ return hvars
+
+ def parse(self, inventory, loader, path, cache=False):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ self._read_config_data(path)
+
+ sources = []
+ try:
+ sources = inventory.processed_sources
+ except AttributeError:
+ if self.get_option('use_vars_plugins'):
+ raise AnsibleOptionsError("The option use_vars_plugins requires ansible >= 2.11.")
+
+ strict = self.get_option('strict')
+ fact_cache = FactCache()
+ try:
+ # Go over hosts (less var copies)
+ for host in inventory.hosts:
+
+ # get available variables to templar
+ hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
+ if host in fact_cache: # adds facts if cache is active
+ hostvars = combine_vars(hostvars, fact_cache[host])
+
+ # create composite vars
+ self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
+
+ # refetch host vars in case new ones have been created above
+ hostvars = self.get_all_host_vars(inventory.hosts[host], loader, sources)
+ if host in self._cache: # adds facts if cache is active
+ hostvars = combine_vars(hostvars, self._cache[host])
+
+ # constructed groups based on conditionals
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=False)
+
+ # constructed groups based variable values
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=False)
+
+ except Exception as e:
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)), orig_exc=e)
diff --git a/lib/ansible/plugins/inventory/generator.py b/lib/ansible/plugins/inventory/generator.py
new file mode 100644
index 0000000..1955f36
--- /dev/null
+++ b/lib/ansible/plugins/inventory/generator.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: generator
+ version_added: "2.6"
+ short_description: Uses Jinja2 to construct hosts and groups from patterns
+ description:
+ - Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
+ - Create a template pattern that describes each host, and then use independent configuration layers
+ - Every element of every layer is combined to create a host for every layer combination
+ - Parent groups can be defined with reference to hosts and other groups using the same template variables
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'generator' plugin.
+ required: True
+ choices: ['ansible.builtin.generator', 'generator']
+ hosts:
+ description:
+ - The C(name) key is a template used to generate
+ hostnames based on the C(layers) option. Each variable in the name is expanded to create a
+ cartesian product of all possible layer combinations.
+ - The C(parents) are a list of parent groups that the host belongs to. Each C(parent) item
+ contains a C(name) key, again expanded from the template, and an optional C(parents) key
+ that lists its parents.
+ - Parents can also contain C(vars), which is a dictionary of vars that
+ is then always set for that variable. This can provide easy access to the group name. E.g
+ set an C(application) variable that is set to the value of the C(application) layer name.
+ layers:
+ description:
+ - A dictionary of layers, with the key being the layer name, used as a variable name in the C(host)
+ C(name) and C(parents) keys. Each layer value is a list of possible values for that layer.
+'''
+
+EXAMPLES = '''
+ # inventory.config file in YAML format
+ # remember to enable this inventory plugin in the ansible.cfg before using
+ # View the output using `ansible-inventory -i inventory.config --list`
+ plugin: ansible.builtin.generator
+ hosts:
+ name: "{{ operation }}_{{ application }}_{{ environment }}_runner"
+ parents:
+ - name: "{{ operation }}_{{ application }}_{{ environment }}"
+ parents:
+ - name: "{{ operation }}_{{ application }}"
+ parents:
+ - name: "{{ operation }}"
+ - name: "{{ application }}"
+ - name: "{{ application }}_{{ environment }}"
+ parents:
+ - name: "{{ application }}"
+ vars:
+ application: "{{ application }}"
+ - name: "{{ environment }}"
+ vars:
+ environment: "{{ environment }}"
+ - name: runner
+ layers:
+ operation:
+ - build
+ - launch
+ environment:
+ - dev
+ - test
+ - prod
+ application:
+ - web
+ - api
+'''
+
+import os
+
+from itertools import product
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ """ constructs groups and vars using Jinja2 template expressions """
+
+ NAME = 'generator'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def template(self, pattern, variables):
+ self.templar.available_variables = variables
+ return self.templar.do_template(pattern)
+
+ def add_parents(self, inventory, child, parents, template_vars):
+ for parent in parents:
+ try:
+ groupname = self.template(parent['name'], template_vars)
+ except (AttributeError, ValueError):
+ raise AnsibleParserError("Element %s has a parent with no name element" % child['name'])
+ if groupname not in inventory.groups:
+ inventory.add_group(groupname)
+ group = inventory.groups[groupname]
+ for (k, v) in parent.get('vars', {}).items():
+ group.set_variable(k, self.template(v, template_vars))
+ inventory.add_child(groupname, child)
+ self.add_parents(inventory, groupname, parent.get('parents', []), template_vars)
+
+ def parse(self, inventory, loader, path, cache=False):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ config = self._read_config_data(path)
+
+ template_inputs = product(*config['layers'].values())
+ for item in template_inputs:
+ template_vars = dict()
+ for i, key in enumerate(config['layers'].keys()):
+ template_vars[key] = item[i]
+ host = self.template(config['hosts']['name'], template_vars)
+ inventory.add_host(host)
+ self.add_parents(inventory, host, config['hosts'].get('parents', []), template_vars)
diff --git a/lib/ansible/plugins/inventory/host_list.py b/lib/ansible/plugins/inventory/host_list.py
new file mode 100644
index 0000000..eee8516
--- /dev/null
+++ b/lib/ansible/plugins/inventory/host_list.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: host_list
+ version_added: "2.4"
+ short_description: Parses a 'host list' string
+ description:
+ - Parses a host list string as a comma separated values of hosts
+ - This plugin only applies to inventory strings that are not paths and contain a comma.
+'''
+
+EXAMPLES = r'''
+ # define 2 hosts in command line
+ # ansible -i '10.10.2.6, 10.10.2.4' -m ping all
+
+ # DNS resolvable names
+ # ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
+
+ # just use localhost
+ # ansible-playbook -i 'localhost,' play.yml -c local
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'host_list'
+
+ def verify_file(self, host_list):
+
+ valid = False
+ b_path = to_bytes(host_list, errors='surrogate_or_strict')
+ if not os.path.exists(b_path) and ',' in host_list:
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, host_list, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, host_list)
+
+ try:
+ for h in host_list.split(','):
+ h = h.strip()
+ if h:
+ try:
+ (host, port) = parse_address(h, allow_ranges=False)
+ except AnsibleError as e:
+ self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
+ host = h
+ port = None
+
+ if host not in self.inventory.hosts:
+ self.inventory.add_host(host, group='ungrouped', port=port)
+ except Exception as e:
+ raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
diff --git a/lib/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
new file mode 100644
index 0000000..b9955cd
--- /dev/null
+++ b/lib/ansible/plugins/inventory/ini.py
@@ -0,0 +1,393 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: ini
+ version_added: "2.4"
+ short_description: Uses an Ansible INI file as inventory source.
+ description:
+ - INI file based inventory, sections are groups or group related with special C(:modifiers).
+ - Entries in sections C([group_1]) are hosts, members of the group.
+ - Hosts can have variables defined inline as key/value pairs separated by C(=).
+ - The C(children) modifier indicates that the section contains groups.
+ - The C(vars) modifier indicates that the section contains variables assigned to members of the group.
+ - Anything found outside a section is considered an 'ungrouped' host.
+ - Values passed in the INI format using the C(key=value) syntax are interpreted differently depending on where they are declared within your inventory.
+ - When declared inline with the host, INI values are processed by Python's ast.literal_eval function
+ (U(https://docs.python.org/3/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
+ (strings, numbers, tuples, lists, dicts, booleans, None). If you want a number to be treated as a string, you must quote it.
+ Host lines accept multiple C(key=value) parameters per line.
+ Therefore they need a way to indicate that a space is part of a value rather than a separator.
+ - When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
+ Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
+ - Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
+ - See the Examples for proper quoting to prevent changes to variable type.
+ notes:
+ - Enabled in configuration by default.
+ - Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
+ The YAML inventory plugin processes variable values consistently and correctly.
+'''
+
+EXAMPLES = '''# fmt: ini
+# Example 1
+[web]
+host1
+host2 ansible_port=222 # defined inline, interpreted as an integer
+
+[web:vars]
+http_port=8080 # all members of 'web' will inherit these
+myvar=23 # defined in a :vars section, interpreted as a string
+
+[web:children] # child groups will automatically add their hosts to parent group
+apache
+nginx
+
+[apache]
+tomcat1
+tomcat2 myvar=34 # host specific vars override group vars
+tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
+
+[nginx]
+jenkins1
+
+[nginx:vars]
+has_java = True # vars in child groups override same in parent
+
+[all:vars]
+has_java = False # 'all' is 'top' parent
+
+# Example 2
+host1 # this is 'ungrouped'
+
+# both hosts have same IP but diff ports, also 'ungrouped'
+host2 ansible_host=127.0.0.1 ansible_port=44
+host3 ansible_host=127.0.0.1 ansible_port=45
+
+[g1]
+host4
+
+[g2]
+host4 # same host as above, but member of 2 groups, will inherit vars from both
+ # inventory hostnames are unique
+'''
+
+import ast
+import re
+
+from ansible.inventory.group import to_safe_group_name
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.utils.shlex import shlex_split
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+ """
+ Takes an INI-format inventory file and builds a list of groups and subgroups
+ with their associated hosts and variable settings.
+ """
+ NAME = 'ini'
+ _COMMENT_MARKERS = frozenset((u';', u'#'))
+ b_COMMENT_MARKERS = frozenset((b';', b'#'))
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self.patterns = {}
+ self._filename = None
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._filename = path
+
+ try:
+ # Read in the hosts, groups, and variables defined in the inventory file.
+ if self.loader:
+ (b_data, private) = self.loader._get_file_contents(path)
+ else:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ with open(b_path, 'rb') as fh:
+ b_data = fh.read()
+
+ try:
+ # Faster to do to_text once on a long string than many
+ # times on smaller strings
+ data = to_text(b_data, errors='surrogate_or_strict').splitlines()
+ except UnicodeError:
+ # Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
+ data = []
+ for line in b_data.splitlines():
+ if line and line[0] in self.b_COMMENT_MARKERS:
+ # Replace is okay for comment lines
+ # data.append(to_text(line, errors='surrogate_then_replace'))
+ # Currently we only need these lines for accurate lineno in errors
+ data.append(u'')
+ else:
+ # Non-comment lines still have to be valid uf-8
+ data.append(to_text(line, errors='surrogate_or_strict'))
+
+ self._parse(path, data)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ def _raise_error(self, message):
+ raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
+
+ def _parse(self, path, lines):
+ '''
+ Populates self.groups from the given array of lines. Raises an error on
+ any parse failure.
+ '''
+
+ self._compile_patterns()
+
+ # We behave as though the first line of the inventory is '[ungrouped]',
+ # and begin to look for host definitions. We make a single pass through
+ # each line of the inventory, building up self.groups and adding hosts,
+ # subgroups, and setting variables as we go.
+
+ pending_declarations = {}
+ groupname = 'ungrouped'
+ state = 'hosts'
+ self.lineno = 0
+ for line in lines:
+ self.lineno += 1
+
+ line = line.strip()
+ # Skip empty lines and comments
+ if not line or line[0] in self._COMMENT_MARKERS:
+ continue
+
+ # Is this a [section] header? That tells us what group we're parsing
+ # definitions for, and what kind of definitions to expect.
+
+ m = self.patterns['section'].match(line)
+ if m:
+ (groupname, state) = m.groups()
+
+ groupname = to_safe_group_name(groupname)
+
+ state = state or 'hosts'
+ if state not in ['hosts', 'children', 'vars']:
+ title = ":".join(m.groups())
+ self._raise_error("Section [%s] has unknown type: %s" % (title, state))
+
+ # If we haven't seen this group before, we add a new Group.
+ if groupname not in self.inventory.groups:
+ # Either [groupname] or [groupname:children] is sufficient to declare a group,
+ # but [groupname:vars] is allowed only if the # group is declared elsewhere.
+ # We add the group anyway, but make a note in pending_declarations to check at the end.
+ #
+ # It's possible that a group is previously pending due to being defined as a child
+ # group, in that case we simply pass so that the logic below to process pending
+ # declarations will take the appropriate action for a pending child group instead of
+ # incorrectly handling it as a var state pending declaration
+ if state == 'vars' and groupname not in pending_declarations:
+ pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
+
+ self.inventory.add_group(groupname)
+
+ # When we see a declaration that we've been waiting for, we process and delete.
+ if groupname in pending_declarations and state != 'vars':
+ if pending_declarations[groupname]['state'] == 'children':
+ self._add_pending_children(groupname, pending_declarations)
+ elif pending_declarations[groupname]['state'] == 'vars':
+ del pending_declarations[groupname]
+
+ continue
+ elif line.startswith('[') and line.endswith(']'):
+ self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
+ "in the section entry, and that there are no other invalid characters")
+
+ # It's not a section, so the current state tells us what kind of
+ # definition it must be. The individual parsers will raise an
+ # error if we feed them something they can't digest.
+
+ # [groupname] contains host definitions that must be added to
+ # the current group.
+ if state == 'hosts':
+ hosts, port, variables = self._parse_host_definition(line)
+ self._populate_host_vars(hosts, variables, groupname, port)
+
+ # [groupname:vars] contains variable definitions that must be
+ # applied to the current group.
+ elif state == 'vars':
+ (k, v) = self._parse_variable_definition(line)
+ self.inventory.set_variable(groupname, k, v)
+
+ # [groupname:children] contains subgroup names that must be
+ # added as children of the current group. The subgroup names
+ # must themselves be declared as groups, but as before, they
+ # may only be declared later.
+ elif state == 'children':
+ child = self._parse_group_name(line)
+ if child not in self.inventory.groups:
+ if child not in pending_declarations:
+ pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
+ else:
+ pending_declarations[child]['parents'].append(groupname)
+ else:
+ self.inventory.add_child(groupname, child)
+ else:
+ # This can happen only if the state checker accepts a state that isn't handled above.
+ self._raise_error("Entered unhandled state: %s" % (state))
+
+ # Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
+ # We report only the first such error here.
+ for g in pending_declarations:
+ decl = pending_declarations[g]
+ if decl['state'] == 'vars':
+ raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
+ elif decl['state'] == 'children':
+ raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
+
+ def _add_pending_children(self, group, pending):
+ for parent in pending[group]['parents']:
+ self.inventory.add_child(parent, group)
+ if parent in pending and pending[parent]['state'] == 'children':
+ self._add_pending_children(parent, pending)
+ del pending[group]
+
+ def _parse_group_name(self, line):
+ '''
+ Takes a single line and tries to parse it as a group name. Returns the
+ group name if successful, or raises an error.
+ '''
+
+ m = self.patterns['groupname'].match(line)
+ if m:
+ return m.group(1)
+
+ self._raise_error("Expected group name, got: %s" % (line))
+
+ def _parse_variable_definition(self, line):
+ '''
+ Takes a string and tries to parse it as a variable definition. Returns
+ the key and value if successful, or raises an error.
+ '''
+
+ # TODO: We parse variable assignments as a key (anything to the left of
+ # an '='"), an '=', and a value (anything left) and leave the value to
+ # _parse_value to sort out. We should be more systematic here about
+ # defining what is acceptable, how quotes work, and so on.
+
+ if '=' in line:
+ (k, v) = [e.strip() for e in line.split("=", 1)]
+ return (k, self._parse_value(v))
+
+ self._raise_error("Expected key=value, got: %s" % (line))
+
+ def _parse_host_definition(self, line):
+ '''
+ Takes a single line and tries to parse it as a host definition. Returns
+ a list of Hosts if successful, or raises an error.
+ '''
+
+ # A host definition comprises (1) a non-whitespace hostname or range,
+ # optionally followed by (2) a series of key="some value" assignments.
+ # We ignore any trailing whitespace and/or comments. For example, here
+ # are a series of host definitions in a group:
+ #
+ # [groupname]
+ # alpha
+ # beta:2345 user=admin # we'll tell shlex
+ # gamma sudo=True user=root # to ignore comments
+
+ try:
+ tokens = shlex_split(line, comments=True)
+ except ValueError as e:
+ self._raise_error("Error parsing host definition '%s': %s" % (line, e))
+
+ (hostnames, port) = self._expand_hostpattern(tokens[0])
+
+ # Try to process anything remaining as a series of key=value pairs.
+ variables = {}
+ for t in tokens[1:]:
+ if '=' not in t:
+ self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
+ (k, v) = t.split('=', 1)
+ variables[k] = self._parse_value(v)
+
+ return hostnames, port, variables
+
+ def _expand_hostpattern(self, hostpattern):
+ '''
+ do some extra checks over normal processing
+ '''
+ # specification?
+
+ hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
+
+ if hostpattern.strip().endswith(':') and port is None:
+ raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
+ hostpattern)
+ for pattern in hostnames:
+ # some YAML parsing prevention checks
+ if pattern.strip() == '---':
+ raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
+
+ return (hostnames, port)
+
+ @staticmethod
+ def _parse_value(v):
+ '''
+ Attempt to transform the string value from an ini file into a basic python object
+ (int, dict, list, unicode string, etc).
+ '''
+ try:
+ v = ast.literal_eval(v)
+ # Using explicit exceptions.
+ # Likely a string that literal_eval does not like. We wil then just set it.
+ except ValueError:
+ # For some reason this was thought to be malformed.
+ pass
+ except SyntaxError:
+ # Is this a hash with an equals at the end?
+ pass
+ return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
+
+ def _compile_patterns(self):
+ '''
+ Compiles the regular expressions required to parse the inventory and
+ stores them in self.patterns.
+ '''
+
+ # Section names are square-bracketed expressions at the beginning of a
+ # line, comprising (1) a group name optionally followed by (2) a tag
+ # that specifies the contents of the section. We ignore any trailing
+ # whitespace and/or comments. For example:
+ #
+ # [groupname]
+ # [somegroup:vars]
+ # [naughty:children] # only get coal in their stockings
+
+ self.patterns['section'] = re.compile(
+ to_text(r'''^\[
+ ([^:\]\s]+) # group name (see groupname below)
+ (?::(\w+))? # optional : and tag name
+ \]
+ \s* # ignore trailing whitespace
+ (?:\#.*)? # and/or a comment till the
+ $ # end of the line
+ ''', errors='surrogate_or_strict'), re.X
+ )
+
+ # FIXME: What are the real restrictions on group names, or rather, what
+ # should they be? At the moment, they must be non-empty sequences of non
+ # whitespace characters excluding ':' and ']', but we should define more
+ # precise rules in order to support better diagnostics.
+
+ self.patterns['groupname'] = re.compile(
+ to_text(r'''^
+ ([^:\]\s]+)
+ \s* # ignore trailing whitespace
+ (?:\#.*)? # and/or a comment till the
+ $ # end of the line
+ ''', errors='surrogate_or_strict'), re.X
+ )
diff --git a/lib/ansible/plugins/inventory/script.py b/lib/ansible/plugins/inventory/script.py
new file mode 100644
index 0000000..4ffd8e1
--- /dev/null
+++ b/lib/ansible/plugins/inventory/script.py
@@ -0,0 +1,196 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: script
+ version_added: "2.4"
+ short_description: Executes an inventory script that returns JSON
+ options:
+ always_show_stderr:
+ description: Toggle display of stderr even when script was successful
+ version_added: "2.5.1"
+ default: True
+ type: boolean
+ ini:
+ - section: inventory_plugin_script
+ key: always_show_stderr
+ env:
+ - name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_STDERR
+ description:
+ - The source provided must be an executable that returns Ansible inventory JSON
+ - The source must accept C(--list) and C(--host <hostname>) as arguments.
+ C(--host) will only be used if no C(_meta) key is present.
+ This is a performance optimization as the script would be called per host otherwise.
+ notes:
+ - Enabled in configuration by default.
+ - The plugin does not cache results because external inventory scripts are responsible for their own caching.
+'''
+
+import os
+import subprocess
+
+from collections.abc import Mapping
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.basic import json_dict_bytes_to_unicode
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin):
+ ''' Host inventory parser for ansible using external inventory scripts. '''
+
+ NAME = 'script'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self._hosts = set()
+
+ def verify_file(self, path):
+ ''' Verify if file is usable by this plugin, base does minimal accessibility check '''
+
+ valid = super(InventoryModule, self).verify_file(path)
+
+ if valid:
+ # not only accessible, file must be executable and/or have shebang
+ shebang_present = False
+ try:
+ with open(path, 'rb') as inv_file:
+ initial_chars = inv_file.read(2)
+ if initial_chars.startswith(b'#!'):
+ shebang_present = True
+ except Exception:
+ pass
+
+ if not os.access(path, os.X_OK) and not shebang_present:
+ valid = False
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=None):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ # Support inventory scripts that are not prefixed with some
+ # path information but happen to be in the current working
+ # directory when '.' is not in PATH.
+ cmd = [path, "--list"]
+
+ try:
+ try:
+ sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
+ (stdout, stderr) = sp.communicate()
+
+ path = to_native(path)
+ err = to_native(stderr or "")
+
+ if err and not err.endswith('\n'):
+ err += '\n'
+
+ if sp.returncode != 0:
+ raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
+
+ # make sure script output is unicode so that json loader will output unicode strings itself
+ try:
+ data = to_text(stdout, errors="strict")
+ except Exception as e:
+ raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
+
+ try:
+ processed = self.loader.load(data, json_only=True)
+ except Exception as e:
+ raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
+
+ # if no other errors happened and you want to force displaying stderr, do so now
+ if stderr and self.get_option('always_show_stderr'):
+ self.display.error(msg=to_text(err))
+
+ if not isinstance(processed, Mapping):
+ raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
+
+ group = None
+ data_from_meta = None
+
+ # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
+ # if this "hostvars" exists at all then do not call --host for each # host.
+ # This is for efficiency and scripts should still return data
+ # if called with --host for backwards compat with 1.2 and earlier.
+ for (group, gdata) in processed.items():
+ if group == '_meta':
+ if 'hostvars' in gdata:
+ data_from_meta = gdata['hostvars']
+ else:
+ self._parse_group(group, gdata)
+
+ for host in self._hosts:
+ got = {}
+ if data_from_meta is None:
+ got = self.get_host_variables(path, host)
+ else:
+ try:
+ got = data_from_meta.get(host, {})
+ except AttributeError as e:
+ raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e)
+
+ self._populate_host_vars([host], got)
+
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ def _parse_group(self, group, data):
+
+ group = self.inventory.add_group(group)
+
+ if not isinstance(data, dict):
+ data = {'hosts': data}
+ # is not those subkeys, then simplified syntax, host with vars
+ elif not any(k in data for k in ('hosts', 'vars', 'children')):
+ data = {'hosts': [group], 'vars': data}
+
+ if 'hosts' in data:
+ if not isinstance(data['hosts'], list):
+ raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
+
+ for hostname in data['hosts']:
+ self._hosts.add(hostname)
+ self.inventory.add_host(hostname, group)
+
+ if 'vars' in data:
+ if not isinstance(data['vars'], dict):
+ raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
+
+ for k, v in data['vars'].items():
+ self.inventory.set_variable(group, k, v)
+
+ if group != '_meta' and isinstance(data, dict) and 'children' in data:
+ for child_name in data['children']:
+ child_name = self.inventory.add_group(child_name)
+ self.inventory.add_child(group, child_name)
+
+ def get_host_variables(self, path, host):
+ """ Runs <script> --host <hostname>, to determine additional host variables """
+
+ cmd = [path, "--host", host]
+ try:
+ sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ (out, err) = sp.communicate()
+ if out.strip() == '':
+ return {}
+ try:
+ return json_dict_bytes_to_unicode(self.loader.load(out, file_name=path))
+ except ValueError:
+ raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/plugins/inventory/toml.py b/lib/ansible/plugins/inventory/toml.py
new file mode 100644
index 0000000..f68b34a
--- /dev/null
+++ b/lib/ansible/plugins/inventory/toml.py
@@ -0,0 +1,298 @@
+# Copyright (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: toml
+ version_added: "2.8"
+ short_description: Uses a specific TOML file as an inventory source.
+ description:
+ - TOML based inventory format
+ - File MUST have a valid '.toml' file extension
+ notes:
+ - >
+ Requires one of the following python libraries: 'toml', 'tomli', or 'tomllib'
+'''
+
+EXAMPLES = r'''# fmt: toml
+# Example 1
+[all.vars]
+has_java = false
+
+[web]
+children = [
+ "apache",
+ "nginx"
+]
+vars = { http_port = 8080, myvar = 23 }
+
+[web.hosts]
+host1 = {}
+host2 = { ansible_port = 222 }
+
+[apache.hosts]
+tomcat1 = {}
+tomcat2 = { myvar = 34 }
+tomcat3 = { mysecret = "03#pa33w0rd" }
+
+[nginx.hosts]
+jenkins1 = {}
+
+[nginx.vars]
+has_java = true
+
+# Example 2
+[all.vars]
+has_java = false
+
+[web]
+children = [
+ "apache",
+ "nginx"
+]
+
+[web.vars]
+http_port = 8080
+myvar = 23
+
+[web.hosts.host1]
+[web.hosts.host2]
+ansible_port = 222
+
+[apache.hosts.tomcat1]
+
+[apache.hosts.tomcat2]
+myvar = 34
+
+[apache.hosts.tomcat3]
+mysecret = "03#pa33w0rd"
+
+[nginx.hosts.jenkins1]
+
+[nginx.vars]
+has_java = true
+
+# Example 3
+[ungrouped.hosts]
+host1 = {}
+host2 = { ansible_host = "127.0.0.1", ansible_port = 44 }
+host3 = { ansible_host = "127.0.0.1", ansible_port = 45 }
+
+[g1.hosts]
+host4 = {}
+
+[g2.hosts]
+host4 = {}
+'''
+
+import os
+import typing as t
+
+from collections.abc import MutableMapping, MutableSequence
+from functools import partial
+
+from ansible.errors import AnsibleFileNotFound, AnsibleParserError, AnsibleRuntimeError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import string_types, text_type
+from ansible.parsing.yaml.objects import AnsibleSequence, AnsibleUnicode
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText
+
+HAS_TOML = False
+try:
+ import toml
+ HAS_TOML = True
+except ImportError:
+ pass
+
+HAS_TOMLIW = False
+try:
+ import tomli_w # type: ignore[import]
+ HAS_TOMLIW = True
+except ImportError:
+ pass
+
+HAS_TOMLLIB = False
+try:
+ import tomllib # type: ignore[import]
+ HAS_TOMLLIB = True
+except ImportError:
+ try:
+ import tomli as tomllib # type: ignore[no-redef]
+ HAS_TOMLLIB = True
+ except ImportError:
+ pass
+
+display = Display()
+
+
+# dumps
+if HAS_TOML and hasattr(toml, 'TomlEncoder'):
+ # toml>=0.10.0
+ class AnsibleTomlEncoder(toml.TomlEncoder):
+ def __init__(self, *args, **kwargs):
+ super(AnsibleTomlEncoder, self).__init__(*args, **kwargs)
+ # Map our custom YAML object types to dump_funcs from ``toml``
+ self.dump_funcs.update({
+ AnsibleSequence: self.dump_funcs.get(list),
+ AnsibleUnicode: self.dump_funcs.get(str),
+ AnsibleUnsafeBytes: self.dump_funcs.get(str),
+ AnsibleUnsafeText: self.dump_funcs.get(str),
+ })
+ toml_dumps = partial(toml.dumps, encoder=AnsibleTomlEncoder()) # type: t.Callable[[t.Any], str]
+else:
+ # toml<0.10.0
+ # tomli-w
+ def toml_dumps(data): # type: (t.Any) -> str
+ if HAS_TOML:
+ return toml.dumps(convert_yaml_objects_to_native(data))
+ elif HAS_TOMLIW:
+ return tomli_w.dumps(convert_yaml_objects_to_native(data))
+ raise AnsibleRuntimeError(
+ 'The python "toml" or "tomli-w" library is required when using the TOML output format'
+ )
+
+# loads
+if HAS_TOML:
+ # prefer toml if installed, since it supports both encoding and decoding
+ toml_loads = toml.loads # type: ignore[assignment]
+ TOMLDecodeError = toml.TomlDecodeError # type: t.Any
+elif HAS_TOMLLIB:
+ toml_loads = tomllib.loads # type: ignore[assignment]
+ TOMLDecodeError = tomllib.TOMLDecodeError # type: t.Any # type: ignore[no-redef]
+
+
+def convert_yaml_objects_to_native(obj):
+ """Older versions of the ``toml`` python library, and tomllib, don't have
+ a pluggable way to tell the encoder about custom types, so we need to
+ ensure objects that we pass are native types.
+
+ Used with:
+ - ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing
+ - ``tomli`` or ``tomllib``
+
+ This function recurses an object and ensures we cast any of the types from
+ ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing
+ the data before we hand it over to the toml library.
+
+ This function doesn't directly check for the types from ``ansible.parsing.yaml.objects``
+ but instead checks for the types those objects inherit from, to offer more flexibility.
+ """
+ if isinstance(obj, dict):
+ return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items())
+ elif isinstance(obj, list):
+ return [convert_yaml_objects_to_native(v) for v in obj]
+ elif isinstance(obj, text_type):
+ return text_type(obj)
+ else:
+ return obj
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+ NAME = 'toml'
+
+ def _parse_group(self, group, group_data):
+ if group_data is not None and not isinstance(group_data, MutableMapping):
+ self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
+ return
+
+ group = self.inventory.add_group(group)
+ if group_data is None:
+ return
+
+ for key, data in group_data.items():
+ if key == 'vars':
+ if not isinstance(data, MutableMapping):
+ raise AnsibleParserError(
+ 'Invalid "vars" entry for "%s" group, requires a dict, found "%s" instead.' %
+ (group, type(data))
+ )
+ for var, value in data.items():
+ self.inventory.set_variable(group, var, value)
+
+ elif key == 'children':
+ if not isinstance(data, MutableSequence):
+ raise AnsibleParserError(
+ 'Invalid "children" entry for "%s" group, requires a list, found "%s" instead.' %
+ (group, type(data))
+ )
+ for subgroup in data:
+ self._parse_group(subgroup, {})
+ self.inventory.add_child(group, subgroup)
+
+ elif key == 'hosts':
+ if not isinstance(data, MutableMapping):
+ raise AnsibleParserError(
+ 'Invalid "hosts" entry for "%s" group, requires a dict, found "%s" instead.' %
+ (group, type(data))
+ )
+ for host_pattern, value in data.items():
+ hosts, port = self._expand_hostpattern(host_pattern)
+ self._populate_host_vars(hosts, value, group, port)
+ else:
+ self.display.warning(
+ 'Skipping unexpected key "%s" in group "%s", only "vars", "children" and "hosts" are valid' %
+ (key, group)
+ )
+
+ def _load_file(self, file_name):
+ if not file_name or not isinstance(file_name, string_types):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
+
+ b_file_name = to_bytes(self.loader.path_dwim(file_name))
+ if not self.loader.path_exists(b_file_name):
+ raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
+
+ try:
+ (b_data, private) = self.loader._get_file_contents(file_name)
+ return toml_loads(to_text(b_data, errors='surrogate_or_strict'))
+ except TOMLDecodeError as e:
+ raise AnsibleParserError(
+ 'TOML file (%s) is invalid: %s' % (file_name, to_native(e)),
+ orig_exc=e
+ )
+ except (IOError, OSError) as e:
+ raise AnsibleParserError(
+ "An error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)),
+ orig_exc=e
+ )
+ except Exception as e:
+ raise AnsibleParserError(
+ "An unexpected error occurred while parsing the file '%s': %s" % (file_name, to_native(e)),
+ orig_exc=e
+ )
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' parses the inventory file '''
+ if not HAS_TOMLLIB and not HAS_TOML:
+ # tomllib works here too, but we don't call it out in the error,
+ # since you either have it or not as part of cpython stdlib >= 3.11
+ raise AnsibleParserError(
+ 'The TOML inventory plugin requires the python "toml", or "tomli" library'
+ )
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ try:
+ data = self._load_file(path)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ if not data:
+ raise AnsibleParserError('Parsed empty TOML file')
+ elif data.get('plugin'):
+ raise AnsibleParserError('Plugin configuration TOML file, not TOML inventory')
+
+ for group_name in data:
+ self._parse_group(group_name, data[group_name])
+
+ def verify_file(self, path):
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+ if ext == '.toml':
+ return True
+ return False
diff --git a/lib/ansible/plugins/inventory/yaml.py b/lib/ansible/plugins/inventory/yaml.py
new file mode 100644
index 0000000..9d5812f
--- /dev/null
+++ b/lib/ansible/plugins/inventory/yaml.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: yaml
+ version_added: "2.4"
+ short_description: Uses a specific YAML file as an inventory source.
+ description:
+ - "YAML-based inventory, should start with the C(all) group and contain hosts/vars/children entries."
+ - Host entries can have sub-entries defined, which will be treated as variables.
+ - Vars entries are normal group vars.
+ - "Children are 'child groups', which can also have their own vars/hosts/children and so on."
+ - File MUST have a valid extension, defined in configuration.
+ notes:
+ - If you want to set vars for the C(all) group inside the inventory file, the C(all) group must be the first entry in the file.
+ - Enabled in configuration by default.
+ options:
+ yaml_extensions:
+ description: list of 'valid' extensions for files containing YAML
+ type: list
+ elements: string
+ default: ['.yaml', '.yml', '.json']
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ - name: ANSIBLE_INVENTORY_PLUGIN_EXTS
+ ini:
+ - key: yaml_valid_extensions
+ section: defaults
+ - section: inventory_plugin_yaml
+ key: yaml_valid_extensions
+
+'''
+EXAMPLES = '''
+all: # keys must be unique, i.e. only one 'hosts' per group
+ hosts:
+ test1:
+ test2:
+ host_var: value
+ vars:
+ group_all_var: value
+ children: # key order does not matter, indentation does
+ other_group:
+ children:
+ group_x:
+ hosts:
+ test5 # Note that one machine will work without a colon
+ #group_x:
+ # hosts:
+ # test5 # But this won't
+ # test7 #
+ group_y:
+ hosts:
+ test6: # So always use a colon
+ vars:
+ g2_var2: value3
+ hosts:
+ test4:
+ ansible_host: 127.0.0.1
+ last_group:
+ hosts:
+ test1 # same host as above, additional group membership
+ vars:
+ group_last_var: value
+'''
+
+import os
+
+from collections.abc import MutableMapping
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+
+NoneType = type(None)
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+
+ NAME = 'yaml'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+ if not ext or ext in self.get_option('yaml_extensions'):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ try:
+ data = self.loader.load_from_file(path, cache=False)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ if not data:
+ raise AnsibleParserError('Parsed empty YAML file')
+ elif not isinstance(data, MutableMapping):
+ raise AnsibleParserError('YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data))
+ elif data.get('plugin'):
+ raise AnsibleParserError('Plugin configuration YAML file, not YAML inventory')
+
+ # We expect top level keys to correspond to groups, iterate over them
+ # to get host, vars and subgroups (which we iterate over recursivelly)
+ if isinstance(data, MutableMapping):
+ for group_name in data:
+ self._parse_group(group_name, data[group_name])
+ else:
+ raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(data))
+
+ def _parse_group(self, group, group_data):
+
+ if isinstance(group_data, (MutableMapping, NoneType)): # type: ignore[misc]
+
+ try:
+ group = self.inventory.add_group(group)
+ except AnsibleError as e:
+ raise AnsibleParserError("Unable to add group %s: %s" % (group, to_text(e)))
+
+ if group_data is not None:
+ # make sure they are dicts
+ for section in ['vars', 'children', 'hosts']:
+ if section in group_data:
+ # convert strings to dicts as these are allowed
+ if isinstance(group_data[section], string_types):
+ group_data[section] = {group_data[section]: None}
+
+ if not isinstance(group_data[section], (MutableMapping, NoneType)): # type: ignore[misc]
+ raise AnsibleParserError('Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.' %
+ (section, group, type(group_data[section])))
+
+ for key in group_data:
+
+ if not isinstance(group_data[key], (MutableMapping, NoneType)): # type: ignore[misc]
+ self.display.warning('Skipping key (%s) in group (%s) as it is not a mapping, it is a %s' % (key, group, type(group_data[key])))
+ continue
+
+ if isinstance(group_data[key], NoneType): # type: ignore[misc]
+ self.display.vvv('Skipping empty key (%s) in group (%s)' % (key, group))
+ elif key == 'vars':
+ for var in group_data[key]:
+ self.inventory.set_variable(group, var, group_data[key][var])
+ elif key == 'children':
+ for subgroup in group_data[key]:
+ subgroup = self._parse_group(subgroup, group_data[key][subgroup])
+ self.inventory.add_child(group, subgroup)
+
+ elif key == 'hosts':
+ for host_pattern in group_data[key]:
+ hosts, port = self._parse_host(host_pattern)
+ self._populate_host_vars(hosts, group_data[key][host_pattern] or {}, group, port)
+ else:
+ self.display.warning('Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid' % (key, group))
+
+ else:
+ self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
+
+ return group
+
+ def _parse_host(self, host_pattern):
+ '''
+ Each host key can be a pattern, try to process it and add variables as needed
+ '''
+ try:
+ (hostnames, port) = self._expand_hostpattern(host_pattern)
+ except TypeError:
+ raise AnsibleParserError(
+ f"Host pattern {host_pattern} must be a string. Enclose integers/floats in quotation marks."
+ )
+ return hostnames, port
diff --git a/lib/ansible/plugins/list.py b/lib/ansible/plugins/list.py
new file mode 100644
index 0000000..e09b293
--- /dev/null
+++ b/lib/ansible/plugins/list.py
@@ -0,0 +1,210 @@
+# (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+
+from ansible import context
+from ansible import constants as C
+from ansible.collections.list import list_collections
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.plugins import loader
+from ansible.utils.display import Display
+from ansible.utils.collection_loader._collection_finder import _get_collection_path, AnsibleCollectionRef
+
+display = Display()
+
+# not real plugins
+IGNORE = {
+ # ptype: names
+ 'module': ('async_wrapper', ),
+ 'cache': ('base', ),
+}
+
+
+def get_composite_name(collection, name, path, depth):
+ resolved_collection = collection
+ if '.' not in name:
+ resource_name = name
+ else:
+ if collection == 'ansible.legacy' and name.startswith('ansible.builtin.'):
+ resolved_collection = 'ansible.builtin'
+ resource_name = '.'.join(name.split(f"{resolved_collection}.")[1:])
+
+ # collectionize name
+ composite = [resolved_collection]
+ if depth:
+ composite.extend(path.split(os.path.sep)[depth * -1:])
+ composite.append(to_native(resource_name))
+ return '.'.join(composite)
+
+
+def _list_plugins_from_paths(ptype, dirs, collection, depth=0):
+
+ plugins = {}
+
+ for path in dirs:
+ display.debug("Searching '{0}'s '{1}' for {2} plugins".format(collection, path, ptype))
+ b_path = to_bytes(path)
+
+ if os.path.basename(b_path).startswith((b'.', b'__')):
+ # skip hidden/special dirs
+ continue
+
+ if os.path.exists(b_path):
+ if os.path.isdir(b_path):
+ bkey = ptype.lower()
+ for plugin_file in os.listdir(b_path):
+
+ if plugin_file.startswith((b'.', b'__')):
+ # hidden or python internal file/dir
+ continue
+
+ display.debug("Found possible plugin: '{0}'".format(plugin_file))
+ b_plugin, b_ext = os.path.splitext(plugin_file)
+ plugin = to_native(b_plugin)
+ full_path = os.path.join(b_path, plugin_file)
+
+ if os.path.isdir(full_path):
+ # its a dir, recurse
+ if collection in C.SYNTHETIC_COLLECTIONS:
+ if not os.path.exists(os.path.join(full_path, b'__init__.py')):
+ # dont recurse for synthetic unless init.py present
+ continue
+
+ # actually recurse dirs
+ plugins.update(_list_plugins_from_paths(ptype, [to_native(full_path)], collection, depth=depth + 1))
+ else:
+ if any([
+ plugin in C.IGNORE_FILES, # general files to ignore
+ to_native(b_ext) in C.REJECT_EXTS, # general extensions to ignore
+ b_ext in (b'.yml', b'.yaml', b'.json'), # ignore docs files TODO: constant!
+ plugin in IGNORE.get(bkey, ()), # plugin in reject list
+ os.path.islink(full_path), # skip aliases, author should document in 'aliaes' field
+ ]):
+ continue
+
+ if ptype in ('test', 'filter'):
+ try:
+ file_plugins = _list_j2_plugins_from_file(collection, full_path, ptype, plugin)
+ except KeyError as e:
+ display.warning('Skipping file %s: %s' % (full_path, to_native(e)))
+ continue
+
+ for plugin in file_plugins:
+ plugin_name = get_composite_name(collection, plugin.ansible_name, os.path.dirname(to_native(full_path)), depth)
+ plugins[plugin_name] = full_path
+ else:
+ plugin_name = get_composite_name(collection, plugin, os.path.dirname(to_native(full_path)), depth)
+ plugins[plugin_name] = full_path
+ else:
+ display.debug("Skip listing plugins in '{0}' as it is not a directory".format(path))
+ else:
+ display.debug("Skip listing plugins in '{0}' as it does not exist".format(path))
+
+ return plugins
+
+
+def _list_j2_plugins_from_file(collection, plugin_path, ptype, plugin_name):
+
+ ploader = getattr(loader, '{0}_loader'.format(ptype))
+ file_plugins = ploader.get_contained_plugins(collection, plugin_path, plugin_name)
+ return file_plugins
+
+
+def list_collection_plugins(ptype, collections, search_paths=None):
+
+ # starts at {plugin_name: filepath, ...}, but changes at the end
+ plugins = {}
+ try:
+ ploader = getattr(loader, '{0}_loader'.format(ptype))
+ except AttributeError:
+ raise AnsibleError('Cannot list plugins, incorrect plugin type supplied: {0}'.format(ptype))
+
+ # get plugins for each collection
+ for collection in collections.keys():
+ if collection == 'ansible.builtin':
+ # dirs from ansible install, but not configured paths
+ dirs = [d.path for d in ploader._get_paths_with_context() if d.internal]
+ elif collection == 'ansible.legacy':
+ # configured paths + search paths (should include basedirs/-M)
+ dirs = [d.path for d in ploader._get_paths_with_context() if not d.internal]
+ if context.CLIARGS.get('module_path', None):
+ dirs.extend(context.CLIARGS['module_path'])
+ else:
+ # search path in this case is for locating collection itselfA
+ b_ptype = to_bytes(C.COLLECTION_PTYPE_COMPAT.get(ptype, ptype))
+ dirs = [to_native(os.path.join(collections[collection], b'plugins', b_ptype))]
+ # acr = AnsibleCollectionRef.try_parse_fqcr(collection, ptype)
+ # if acr:
+ # dirs = acr.subdirs
+ # else:
+
+ # raise Exception('bad acr for %s, %s' % (collection, ptype))
+
+ plugins.update(_list_plugins_from_paths(ptype, dirs, collection))
+
+ # return plugin and it's class object, None for those not verifiable or failing
+ if ptype in ('module',):
+ # no 'invalid' tests for modules
+ for plugin in plugins.keys():
+ plugins[plugin] = (plugins[plugin], None)
+ else:
+ # detect invalid plugin candidates AND add loaded object to return data
+ for plugin in list(plugins.keys()):
+ pobj = None
+ try:
+ pobj = ploader.get(plugin, class_only=True)
+ except Exception as e:
+ display.vvv("The '{0}' {1} plugin could not be loaded from '{2}': {3}".format(plugin, ptype, plugins[plugin], to_native(e)))
+
+ # sets final {plugin_name: (filepath, class|NONE if not loaded), ...}
+ plugins[plugin] = (plugins[plugin], pobj)
+
+ # {plugin_name: (filepath, class), ...}
+ return plugins
+
+
+def list_plugins(ptype, collection=None, search_paths=None):
+
+ # {plugin_name: (filepath, class), ...}
+ plugins = {}
+ collections = {}
+ if collection is None:
+ # list all collections, add synthetic ones
+ collections['ansible.builtin'] = b''
+ collections['ansible.legacy'] = b''
+ collections.update(list_collections(search_paths=search_paths, dedupe=True))
+ elif collection == 'ansible.legacy':
+ # add builtin, since legacy also resolves to these
+ collections[collection] = b''
+ collections['ansible.builtin'] = b''
+ else:
+ try:
+ collections[collection] = to_bytes(_get_collection_path(collection))
+ except ValueError as e:
+ raise AnsibleError("Cannot use supplied collection {0}: {1}".format(collection, to_native(e)), orig_exc=e)
+
+ if collections:
+ plugins.update(list_collection_plugins(ptype, collections))
+
+ return plugins
+
+
+# wrappers
+def list_plugin_names(ptype, collection=None):
+ return [plugin.ansible_name for plugin in list_plugins(ptype, collection)]
+
+
+def list_plugin_files(ptype, collection=None):
+ plugins = list_plugins(ptype, collection)
+ return [plugins[k][0] for k in plugins.keys()]
+
+
+def list_plugin_classes(ptype, collection=None):
+ plugins = list_plugins(ptype, collection)
+ return [plugins[k][1] for k in plugins.keys()]
diff --git a/lib/ansible/plugins/loader.py b/lib/ansible/plugins/loader.py
new file mode 100644
index 0000000..845fdcd
--- /dev/null
+++ b/lib/ansible/plugins/loader.py
@@ -0,0 +1,1622 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import os.path
+import pkgutil
+import sys
+import warnings
+
+from collections import defaultdict, namedtuple
+from traceback import format_exc
+
+from ansible import __version__ as ansible_version
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsiblePluginCircularRedirect, AnsiblePluginRemovedError, AnsibleCollectionUnsupportedVersionError
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.module_utils.compat.importlib import import_module
+from ansible.module_utils.six import string_types
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder, _get_collection_metadata
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import add_fragments, find_plugin_docfile
+
+# TODO: take the packaging dep, or vendor SpecifierSet?
+
+try:
+ from packaging.specifiers import SpecifierSet
+ from packaging.version import Version
+except ImportError:
+ SpecifierSet = None # type: ignore[misc]
+ Version = None # type: ignore[misc]
+
+import importlib.util
+
+display = Display()
+
+get_with_context_result = namedtuple('get_with_context_result', ['object', 'plugin_load_context'])
+
+
+def get_all_plugin_loaders():
+ return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
+
+
+def add_all_plugin_dirs(path):
+ ''' add any existing plugin dirs in the path provided '''
+ b_path = os.path.expanduser(to_bytes(path, errors='surrogate_or_strict'))
+ if os.path.isdir(b_path):
+ for name, obj in get_all_plugin_loaders():
+ if obj.subdir:
+ plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
+ if os.path.isdir(plugin_path):
+ obj.add_directory(to_text(plugin_path))
+ else:
+ display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
+
+
+def get_shell_plugin(shell_type=None, executable=None):
+
+ if not shell_type:
+ # default to sh
+ shell_type = 'sh'
+
+ # mostly for backwards compat
+ if executable:
+ if isinstance(executable, string_types):
+ shell_filename = os.path.basename(executable)
+ try:
+ shell = shell_loader.get(shell_filename)
+ except Exception:
+ shell = None
+
+ if shell is None:
+ for shell in shell_loader.all():
+ if shell_filename in shell.COMPATIBLE_SHELLS:
+ shell_type = shell.SHELL_FAMILY
+ break
+ else:
+ raise AnsibleError("Either a shell type or a shell executable must be provided ")
+
+ shell = shell_loader.get(shell_type)
+ if not shell:
+ raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
+
+ if executable:
+ setattr(shell, 'executable', executable)
+
+ return shell
+
+
+def add_dirs_to_loader(which_loader, paths):
+
+ loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
+ for path in paths:
+ loader.add_directory(path, with_subdir=True)
+
+
+class PluginPathContext(object):
+ def __init__(self, path, internal):
+ self.path = path
+ self.internal = internal
+
+
+class PluginLoadContext(object):
+ def __init__(self):
+ self.original_name = None
+ self.redirect_list = []
+ self.error_list = []
+ self.import_error_list = []
+ self.load_attempts = []
+ self.pending_redirect = None
+ self.exit_reason = None
+ self.plugin_resolved_path = None
+ self.plugin_resolved_name = None
+ self.plugin_resolved_collection = None # empty string for resolved plugins from user-supplied paths
+ self.deprecated = False
+ self.removal_date = None
+ self.removal_version = None
+ self.deprecation_warnings = []
+ self.resolved = False
+ self._resolved_fqcn = None
+ self.action_plugin = None
+
+ @property
+ def resolved_fqcn(self):
+ if not self.resolved:
+ return
+
+ if not self._resolved_fqcn:
+ final_plugin = self.redirect_list[-1]
+ if AnsibleCollectionRef.is_valid_fqcr(final_plugin) and final_plugin.startswith('ansible.legacy.'):
+ final_plugin = final_plugin.split('ansible.legacy.')[-1]
+ if self.plugin_resolved_collection and not AnsibleCollectionRef.is_valid_fqcr(final_plugin):
+ final_plugin = self.plugin_resolved_collection + '.' + final_plugin
+ self._resolved_fqcn = final_plugin
+
+ return self._resolved_fqcn
+
+ def record_deprecation(self, name, deprecation, collection_name):
+ if not deprecation:
+ return self
+
+ # The `or ''` instead of using `.get(..., '')` makes sure that even if the user explicitly
+ # sets `warning_text` to `~` (None) or `false`, we still get an empty string.
+ warning_text = deprecation.get('warning_text', None) or ''
+ removal_date = deprecation.get('removal_date', None)
+ removal_version = deprecation.get('removal_version', None)
+ # If both removal_date and removal_version are specified, use removal_date
+ if removal_date is not None:
+ removal_version = None
+ warning_text = '{0} has been deprecated.{1}{2}'.format(name, ' ' if warning_text else '', warning_text)
+
+ display.deprecated(warning_text, date=removal_date, version=removal_version, collection_name=collection_name)
+
+ self.deprecated = True
+ if removal_date:
+ self.removal_date = removal_date
+ if removal_version:
+ self.removal_version = removal_version
+ self.deprecation_warnings.append(warning_text)
+ return self
+
+ def resolve(self, resolved_name, resolved_path, resolved_collection, exit_reason, action_plugin):
+ self.pending_redirect = None
+ self.plugin_resolved_name = resolved_name
+ self.plugin_resolved_path = resolved_path
+ self.plugin_resolved_collection = resolved_collection
+ self.exit_reason = exit_reason
+ self.resolved = True
+ self.action_plugin = action_plugin
+ return self
+
+ def redirect(self, redirect_name):
+ self.pending_redirect = redirect_name
+ self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
+ self.resolved = False
+ return self
+
+ def nope(self, exit_reason):
+ self.pending_redirect = None
+ self.exit_reason = exit_reason
+ self.resolved = False
+ return self
+
+
+class PluginLoader:
+ '''
+ PluginLoader loads plugins from the configured plugin directories.
+
+ It searches for plugins by iterating through the combined list of play basedirs, configured
+ paths, and the python path. The first match is used.
+ '''
+
+ def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
+ aliases = {} if aliases is None else aliases
+
+ self.class_name = class_name
+ self.base_class = required_base_class
+ self.package = package
+ self.subdir = subdir
+
+ # FIXME: remove alias dict in favor of alias by symlink?
+ self.aliases = aliases
+
+ if config and not isinstance(config, list):
+ config = [config]
+ elif not config:
+ config = []
+
+ self.config = config
+
+ if class_name not in MODULE_CACHE:
+ MODULE_CACHE[class_name] = {}
+ if class_name not in PATH_CACHE:
+ PATH_CACHE[class_name] = None
+ if class_name not in PLUGIN_PATH_CACHE:
+ PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
+
+ # hold dirs added at runtime outside of config
+ self._extra_dirs = []
+
+ # caches
+ self._module_cache = MODULE_CACHE[class_name]
+ self._paths = PATH_CACHE[class_name]
+ self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
+
+ self._searched_paths = set()
+
+ @property
+ def type(self):
+ return AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
+
+ def __repr__(self):
+ return 'PluginLoader(type={0})'.format(self.type)
+
+ def _clear_caches(self):
+
+ if C.OLD_PLUGIN_CACHE_CLEARING:
+ self._paths = None
+ else:
+ # reset global caches
+ MODULE_CACHE[self.class_name] = {}
+ PATH_CACHE[self.class_name] = None
+ PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
+
+ # reset internal caches
+ self._module_cache = MODULE_CACHE[self.class_name]
+ self._paths = PATH_CACHE[self.class_name]
+ self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
+ self._searched_paths = set()
+
+ def __setstate__(self, data):
+ '''
+ Deserializer.
+ '''
+
+ class_name = data.get('class_name')
+ package = data.get('package')
+ config = data.get('config')
+ subdir = data.get('subdir')
+ aliases = data.get('aliases')
+ base_class = data.get('base_class')
+
+ PATH_CACHE[class_name] = data.get('PATH_CACHE')
+ PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
+
+ self.__init__(class_name, package, config, subdir, aliases, base_class)
+ self._extra_dirs = data.get('_extra_dirs', [])
+ self._searched_paths = data.get('_searched_paths', set())
+
+ def __getstate__(self):
+ '''
+ Serializer.
+ '''
+
+ return dict(
+ class_name=self.class_name,
+ base_class=self.base_class,
+ package=self.package,
+ config=self.config,
+ subdir=self.subdir,
+ aliases=self.aliases,
+ _extra_dirs=self._extra_dirs,
+ _searched_paths=self._searched_paths,
+ PATH_CACHE=PATH_CACHE[self.class_name],
+ PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
+ )
+
+ def format_paths(self, paths):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in paths:
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ def print_paths(self):
+ return self.format_paths(self._get_paths(subdirs=False))
+
+ def _all_directories(self, dir):
+ results = []
+ results.append(dir)
+ for root, subdirs, files in os.walk(dir, followlinks=True):
+ if '__init__.py' in files:
+ for x in subdirs:
+ results.append(os.path.join(root, x))
+ return results
+
+ def _get_package_paths(self, subdirs=True):
+ ''' Gets the path of a Python package '''
+
+ if not self.package:
+ return []
+ if not hasattr(self, 'package_path'):
+ m = __import__(self.package)
+ parts = self.package.split('.')[1:]
+ for parent_mod in parts:
+ m = getattr(m, parent_mod)
+ self.package_path = to_text(os.path.dirname(m.__file__), errors='surrogate_or_strict')
+ if subdirs:
+ return self._all_directories(self.package_path)
+ return [self.package_path]
+
+ def _get_paths_with_context(self, subdirs=True):
+ ''' Return a list of PluginPathContext objects to search for plugins in '''
+
+ # FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
+ # In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
+ # which always calls it with subdirs=False. So there currently isn't a problem with this caching.
+ if self._paths is not None:
+ return self._paths
+
+ ret = [PluginPathContext(p, False) for p in self._extra_dirs]
+
+ # look in any configured plugin paths, allow one level deep for subcategories
+ if self.config is not None:
+ for path in self.config:
+ path = os.path.abspath(os.path.expanduser(path))
+ if subdirs:
+ contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
+ for c in contents:
+ c = to_text(c, errors='surrogate_or_strict')
+ if os.path.isdir(c) and c not in ret:
+ ret.append(PluginPathContext(c, False))
+
+ path = to_text(path, errors='surrogate_or_strict')
+ if path not in ret:
+ ret.append(PluginPathContext(path, False))
+
+ # look for any plugins installed in the package subtree
+ # Note package path always gets added last so that every other type of
+ # path is searched before it.
+ ret.extend([PluginPathContext(p, True) for p in self._get_package_paths(subdirs=subdirs)])
+
+ # HACK: because powershell modules are in the same directory
+ # hierarchy as other modules we have to process them last. This is
+ # because powershell only works on windows but the other modules work
+ # anywhere (possibly including windows if the correct language
+ # interpreter is installed). the non-powershell modules can have any
+ # file extension and thus powershell modules are picked up in that.
+ # The non-hack way to fix this is to have powershell modules be
+ # a different PluginLoader/ModuleLoader. But that requires changing
+ # other things too (known thing to change would be PATHS_CACHE,
+ # PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
+ # on the class_name and neither regular modules nor powershell modules
+ # would have class_names, they would not work as written.
+ #
+ # The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
+ # also in the original order they were found in 'ret'.
+ # The .sort() method is guaranteed to be stable, so original order is preserved.
+ ret.sort(key=lambda p: p.path.endswith('/windows'))
+
+ # cache and return the result
+ self._paths = ret
+ return ret
+
+ def _get_paths(self, subdirs=True):
+ ''' Return a list of paths to search for plugins in '''
+
+ paths_with_context = self._get_paths_with_context(subdirs=subdirs)
+ return [path_with_context.path for path_with_context in paths_with_context]
+
+ def _load_config_defs(self, name, module, path):
+ ''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
+
+ # plugins w/o class name don't support config
+ if self.class_name:
+ type_name = get_plugin_class(self.class_name)
+
+ # if type name != 'module_doc_fragment':
+ if type_name in C.CONFIGURABLE_PLUGINS and not C.config.has_configuration_definition(type_name, name):
+ dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
+
+ # TODO: allow configurable plugins to use sidecar
+ # if not dstring:
+ # filename, cn = find_plugin_docfile( name, type_name, self, [os.path.dirname(path)], C.YAML_DOC_EXTENSIONS)
+ # # TODO: dstring = AnsibleLoader(, file_name=path).get_single_data()
+
+ if dstring:
+ add_fragments(dstring, path, fragment_loader=fragment_loader, is_module=(type_name == 'module'))
+
+ if 'options' in dstring and isinstance(dstring['options'], dict):
+ C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
+ display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
+
+ def add_directory(self, directory, with_subdir=False):
+ ''' Adds an additional directory to the search path '''
+
+ directory = os.path.realpath(directory)
+
+ if directory is not None:
+ if with_subdir:
+ directory = os.path.join(directory, self.subdir)
+ if directory not in self._extra_dirs:
+ # append the directory and invalidate the path cache
+ self._extra_dirs.append(directory)
+ self._clear_caches()
+ display.debug('Added %s to loader search path' % (directory))
+
+ def _query_collection_routing_meta(self, acr, plugin_type, extension=None):
+ collection_pkg = import_module(acr.n_python_collection_package_name)
+ if not collection_pkg:
+ return None
+
+ # FIXME: shouldn't need this...
+ try:
+ # force any type-specific metadata postprocessing to occur
+ import_module(acr.n_python_collection_package_name + '.plugins.{0}'.format(plugin_type))
+ except ImportError:
+ pass
+
+ # this will be created by the collection PEP302 loader
+ collection_meta = getattr(collection_pkg, '_collection_meta', None)
+
+ if not collection_meta:
+ return None
+
+ # TODO: add subdirs support
+ # check for extension-specific entry first (eg 'setup.ps1')
+ # TODO: str/bytes on extension/name munging
+ if acr.subdirs:
+ subdir_qualified_resource = '.'.join([acr.subdirs, acr.resource])
+ else:
+ subdir_qualified_resource = acr.resource
+ entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource + extension, None)
+ if not entry:
+ # try for extension-agnostic entry
+ entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource, None)
+ return entry
+
+ def _find_fq_plugin(self, fq_name, extension, plugin_load_context, ignore_deprecated=False):
+ """Search builtin paths to find a plugin. No external paths are searched,
+ meaning plugins inside roles inside collections will be ignored.
+ """
+
+ plugin_load_context.resolved = False
+
+ plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
+
+ acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
+
+ # check collection metadata to see if any special handling is required for this plugin
+ routing_metadata = self._query_collection_routing_meta(acr, plugin_type, extension=extension)
+
+ action_plugin = None
+ # TODO: factor this into a wrapper method
+ if routing_metadata:
+ deprecation = routing_metadata.get('deprecation', None)
+
+ # this will no-op if there's no deprecation metadata for this plugin
+ if not ignore_deprecated:
+ plugin_load_context.record_deprecation(fq_name, deprecation, acr.collection)
+
+ tombstone = routing_metadata.get('tombstone', None)
+
+ # FIXME: clean up text gen
+ if tombstone:
+ removal_date = tombstone.get('removal_date')
+ removal_version = tombstone.get('removal_version')
+ warning_text = tombstone.get('warning_text') or ''
+ warning_text = '{0} has been removed.{1}{2}'.format(fq_name, ' ' if warning_text else '', warning_text)
+ removed_msg = display.get_deprecation_message(msg=warning_text, version=removal_version,
+ date=removal_date, removed=True,
+ collection_name=acr.collection)
+ plugin_load_context.removal_date = removal_date
+ plugin_load_context.removal_version = removal_version
+ plugin_load_context.resolved = True
+ plugin_load_context.exit_reason = removed_msg
+ raise AnsiblePluginRemovedError(removed_msg, plugin_load_context=plugin_load_context)
+
+ redirect = routing_metadata.get('redirect', None)
+
+ if redirect:
+ # Prevent mystery redirects that would be determined by the collections keyword
+ if not AnsibleCollectionRef.is_valid_fqcr(redirect):
+ raise AnsibleError(
+ f"Collection {acr.collection} contains invalid redirect for {fq_name}: {redirect}. "
+ "Redirects must use fully qualified collection names."
+ )
+
+ # FIXME: remove once this is covered in debug or whatever
+ display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
+
+ # The name doing the redirection is added at the beginning of _resolve_plugin_step,
+ # but if the unqualified name is used in conjunction with the collections keyword, only
+ # the unqualified name is in the redirect list.
+ if fq_name not in plugin_load_context.redirect_list:
+ plugin_load_context.redirect_list.append(fq_name)
+ return plugin_load_context.redirect(redirect)
+ # TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
+
+ if self.type == 'modules':
+ action_plugin = routing_metadata.get('action_plugin')
+
+ n_resource = to_native(acr.resource, errors='strict')
+ # we want this before the extension is added
+ full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
+
+ if extension:
+ n_resource += extension
+
+ pkg = sys.modules.get(acr.n_python_package_name)
+ if not pkg:
+ # FIXME: there must be cheaper/safer way to do this
+ try:
+ pkg = import_module(acr.n_python_package_name)
+ except ImportError:
+ return plugin_load_context.nope('Python package {0} not found'.format(acr.n_python_package_name))
+
+ pkg_path = os.path.dirname(pkg.__file__)
+
+ n_resource_path = os.path.join(pkg_path, n_resource)
+
+ # FIXME: and is file or file link or ...
+ if os.path.exists(n_resource_path):
+ return plugin_load_context.resolve(
+ full_name, to_text(n_resource_path), acr.collection, 'found exact match for {0} in {1}'.format(full_name, acr.collection), action_plugin)
+
+ if extension:
+ # the request was extension-specific, don't try for an extensionless match
+ return plugin_load_context.nope('no match for {0} in {1}'.format(to_text(n_resource), acr.collection))
+
+ # look for any matching extension in the package location (sans filter)
+ found_files = [f
+ for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*')
+ if os.path.isfile(f) and not f.endswith(C.MODULE_IGNORE_EXTS)]
+
+ if not found_files:
+ return plugin_load_context.nope('failed fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
+
+ found_files = sorted(found_files) # sort to ensure deterministic results, with the shortest match first
+
+ if len(found_files) > 1:
+ display.debug('Found several possible candidates for the plugin but using first: %s' % ','.join(found_files))
+
+ return plugin_load_context.resolve(
+ full_name, to_text(found_files[0]), acr.collection,
+ 'found fuzzy extension match for {0} in {1}'.format(full_name, acr.collection), action_plugin)
+
+ def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
+ ''' Find a plugin named name '''
+ result = self.find_plugin_with_context(name, mod_type, ignore_deprecated, check_aliases, collection_list)
+ if result.resolved and result.plugin_resolved_path:
+ return result.plugin_resolved_path
+
+ return None
+
+ def find_plugin_with_context(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
+ ''' Find a plugin named name, returning contextual info about the load, recursively resolving redirection '''
+ plugin_load_context = PluginLoadContext()
+ plugin_load_context.original_name = name
+ while True:
+ result = self._resolve_plugin_step(name, mod_type, ignore_deprecated, check_aliases, collection_list, plugin_load_context=plugin_load_context)
+ if result.pending_redirect:
+ if result.pending_redirect in result.redirect_list:
+ raise AnsiblePluginCircularRedirect('plugin redirect loop resolving {0} (path: {1})'.format(result.original_name, result.redirect_list))
+ name = result.pending_redirect
+ result.pending_redirect = None
+ plugin_load_context = result
+ else:
+ break
+
+ # TODO: smuggle these to the controller when we're in a worker, reduce noise from normal things like missing plugin packages during collection search
+ if plugin_load_context.error_list:
+ display.warning("errors were encountered during the plugin load for {0}:\n{1}".format(name, plugin_load_context.error_list))
+
+ # TODO: display/return import_error_list? Only useful for forensics...
+
+ # FIXME: store structured deprecation data in PluginLoadContext and use display.deprecate
+ # if plugin_load_context.deprecated and C.config.get_config_value('DEPRECATION_WARNINGS'):
+ # for dw in plugin_load_context.deprecation_warnings:
+ # # TODO: need to smuggle these to the controller if we're in a worker context
+ # display.warning('[DEPRECATION WARNING] ' + dw)
+
+ return plugin_load_context
+
+ # FIXME: name bikeshed
+ def _resolve_plugin_step(self, name, mod_type='', ignore_deprecated=False,
+ check_aliases=False, collection_list=None, plugin_load_context=PluginLoadContext()):
+ if not plugin_load_context:
+ raise ValueError('A PluginLoadContext is required')
+
+ plugin_load_context.redirect_list.append(name)
+ plugin_load_context.resolved = False
+
+ if name in _PLUGIN_FILTERS[self.package]:
+ plugin_load_context.exit_reason = '{0} matched a defined plugin filter'.format(name)
+ return plugin_load_context
+
+ if mod_type:
+ suffix = mod_type
+ elif self.class_name:
+ # Ansible plugins that run in the controller process (most plugins)
+ suffix = '.py'
+ else:
+ # Only Ansible Modules. Ansible modules can be any executable so
+ # they can have any suffix
+ suffix = ''
+
+ # FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
+ if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
+ if '.' in name or not collection_list:
+ candidates = [name]
+ else:
+ candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
+
+ for candidate_name in candidates:
+ try:
+ plugin_load_context.load_attempts.append(candidate_name)
+ # HACK: refactor this properly
+ if candidate_name.startswith('ansible.legacy'):
+ # 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
+ # They need to search 'library' and the various '*_plugins' directories in order to find the file.
+ plugin_load_context = self._find_plugin_legacy(name.removeprefix('ansible.legacy.'),
+ plugin_load_context, ignore_deprecated, check_aliases, suffix)
+ else:
+ # 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
+ plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context,
+ ignore_deprecated=ignore_deprecated)
+
+ # Pending redirects are added to the redirect_list at the beginning of _resolve_plugin_step.
+ # Once redirects are resolved, ensure the final FQCN is added here.
+ # e.g. 'ns.coll.module' is included rather than only 'module' if a collections list is provided:
+ # - module:
+ # collections: ['ns.coll']
+ if plugin_load_context.resolved and candidate_name not in plugin_load_context.redirect_list:
+ plugin_load_context.redirect_list.append(candidate_name)
+
+ if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
+ return plugin_load_context
+ except (AnsiblePluginRemovedError, AnsiblePluginCircularRedirect, AnsibleCollectionUnsupportedVersionError):
+ # these are generally fatal, let them fly
+ raise
+ except ImportError as ie:
+ plugin_load_context.import_error_list.append(ie)
+ except Exception as ex:
+ # FIXME: keep actual errors, not just assembled messages
+ plugin_load_context.error_list.append(to_native(ex))
+
+ if plugin_load_context.error_list:
+ display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(plugin_load_context.error_list)))
+
+ plugin_load_context.exit_reason = 'no matches found for {0}'.format(name)
+
+ return plugin_load_context
+
+ # if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
+
+ return self._find_plugin_legacy(name, plugin_load_context, ignore_deprecated, check_aliases, suffix)
+
+ def _find_plugin_legacy(self, name, plugin_load_context, ignore_deprecated=False, check_aliases=False, suffix=None):
+ """Search library and various *_plugins paths in order to find the file.
+ This was behavior prior to the existence of collections.
+ """
+ plugin_load_context.resolved = False
+
+ if check_aliases:
+ name = self.aliases.get(name, name)
+
+ # The particular cache to look for modules within. This matches the
+ # requested mod_type
+ pull_cache = self._plugin_path_cache[suffix]
+ try:
+ path_with_context = pull_cache[name]
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context._resolved_fqcn = ('ansible.builtin.' + name if path_with_context.internal else name)
+ plugin_load_context.resolved = True
+ return plugin_load_context
+ except KeyError:
+ # Cache miss. Now let's find the plugin
+ pass
+
+ # TODO: Instead of using the self._paths cache (PATH_CACHE) and
+ # self._searched_paths we could use an iterator. Before enabling that
+ # we need to make sure we don't want to add additional directories
+ # (add_directory()) once we start using the iterator.
+ # We can use _get_paths_with_context() since add_directory() forces a cache refresh.
+ for path_with_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(to_bytes(p.path))):
+ path = path_with_context.path
+ b_path = to_bytes(path)
+ display.debug('trying %s' % path)
+ plugin_load_context.load_attempts.append(path)
+ internal = path_with_context.internal
+ try:
+ full_paths = (os.path.join(b_path, f) for f in os.listdir(b_path))
+ except OSError as e:
+ display.warning("Error accessing plugin paths: %s" % to_text(e))
+
+ for full_path in (to_native(f) for f in full_paths if os.path.isfile(f) and not f.endswith(b'__init__.py')):
+ full_name = os.path.basename(full_path)
+
+ # HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
+ # FIXME: I believe this is only correct for modules and module_utils.
+ # For all other plugins we want .pyc and .pyo should be valid
+ if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
+ continue
+ splitname = os.path.splitext(full_name)
+ base_name = splitname[0]
+ try:
+ extension = splitname[1]
+ except IndexError:
+ extension = ''
+
+ # everything downstream expects unicode
+ full_path = to_text(full_path, errors='surrogate_or_strict')
+ # Module found, now enter it into the caches that match this file
+ if base_name not in self._plugin_path_cache['']:
+ self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
+
+ if full_name not in self._plugin_path_cache['']:
+ self._plugin_path_cache[''][full_name] = PluginPathContext(full_path, internal)
+
+ if base_name not in self._plugin_path_cache[extension]:
+ self._plugin_path_cache[extension][base_name] = PluginPathContext(full_path, internal)
+
+ if full_name not in self._plugin_path_cache[extension]:
+ self._plugin_path_cache[extension][full_name] = PluginPathContext(full_path, internal)
+
+ self._searched_paths.add(path)
+ try:
+ path_with_context = pull_cache[name]
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context._resolved_fqcn = 'ansible.builtin.' + name if path_with_context.internal else name
+ plugin_load_context.resolved = True
+ return plugin_load_context
+ except KeyError:
+ # Didn't find the plugin in this directory. Load modules from the next one
+ pass
+
+ # if nothing is found, try finding alias/deprecated
+ if not name.startswith('_'):
+ alias_name = '_' + name
+ # We've already cached all the paths at this point
+ if alias_name in pull_cache:
+ path_with_context = pull_cache[alias_name]
+ if not ignore_deprecated and not os.path.islink(path_with_context.path):
+ # FIXME: this is not always the case, some are just aliases
+ display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
+ 'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = alias_name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context._resolved_fqcn = 'ansible.builtin.' + alias_name if path_with_context.internal else alias_name
+ plugin_load_context.resolved = True
+ return plugin_load_context
+
+ # last ditch, if it's something that can be redirected, look for a builtin redirect before giving up
+ candidate_fqcr = 'ansible.builtin.{0}'.format(name)
+ if '.' not in name and AnsibleCollectionRef.is_valid_fqcr(candidate_fqcr):
+ return self._find_fq_plugin(fq_name=candidate_fqcr, extension=suffix, plugin_load_context=plugin_load_context, ignore_deprecated=ignore_deprecated)
+
+ return plugin_load_context.nope('{0} is not eligible for last-chance resolution'.format(name))
+
+ def has_plugin(self, name, collection_list=None):
+ ''' Checks if a plugin named name exists '''
+
+ try:
+ return self.find_plugin(name, collection_list=collection_list) is not None
+ except Exception as ex:
+ if isinstance(ex, AnsibleError):
+ raise
+ # log and continue, likely an innocuous type/package loading failure in collections import
+ display.debug('has_plugin error: {0}'.format(to_text(ex)))
+
+ __contains__ = has_plugin
+
+ def _load_module_source(self, name, path):
+
+ # avoid collisions across plugins
+ if name.startswith('ansible_collections.'):
+ full_name = name
+ else:
+ full_name = '.'.join([self.package, name])
+
+ if full_name in sys.modules:
+ # Avoids double loading, See https://github.com/ansible/ansible/issues/13110
+ return sys.modules[full_name]
+
+ with warnings.catch_warnings():
+ # FIXME: this still has issues if the module was previously imported but not "cached",
+ # we should bypass this entire codepath for things that are directly importable
+ warnings.simplefilter("ignore", RuntimeWarning)
+ spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
+ module = importlib.util.module_from_spec(spec)
+
+ # mimic import machinery; make the module-being-loaded available in sys.modules during import
+ # and remove if there's a failure...
+ sys.modules[full_name] = module
+
+ try:
+ spec.loader.exec_module(module)
+ except Exception:
+ del sys.modules[full_name]
+ raise
+
+ return module
+
+ def _update_object(self, obj, name, path, redirected_names=None, resolved=None):
+
+ # set extra info on the module, in case we want it later
+ setattr(obj, '_original_path', path)
+ setattr(obj, '_load_name', name)
+ setattr(obj, '_redirected_names', redirected_names or [])
+
+ names = []
+ if resolved:
+ names.append(resolved)
+ if redirected_names:
+ # reverse list so best name comes first
+ names.extend(redirected_names[::-1])
+ if not names:
+ raise AnsibleError(f"Missing FQCN for plugin source {name}")
+
+ setattr(obj, 'ansible_aliases', names)
+ setattr(obj, 'ansible_name', names[0])
+
+ def get(self, name, *args, **kwargs):
+ return self.get_with_context(name, *args, **kwargs).object
+
+ def get_with_context(self, name, *args, **kwargs):
+ ''' instantiates a plugin of the given name using arguments '''
+
+ found_in_cache = True
+ class_only = kwargs.pop('class_only', False)
+ collection_list = kwargs.pop('collection_list', None)
+ if name in self.aliases:
+ name = self.aliases[name]
+ plugin_load_context = self.find_plugin_with_context(name, collection_list=collection_list)
+ if not plugin_load_context.resolved or not plugin_load_context.plugin_resolved_path:
+ # FIXME: this is probably an error (eg removed plugin)
+ return get_with_context_result(None, plugin_load_context)
+
+ fq_name = plugin_load_context.resolved_fqcn
+ if '.' not in fq_name:
+ fq_name = '.'.join((plugin_load_context.plugin_resolved_collection, fq_name))
+ name = plugin_load_context.plugin_resolved_name
+ path = plugin_load_context.plugin_resolved_path
+ redirected_names = plugin_load_context.redirect_list or []
+
+ if path not in self._module_cache:
+ self._module_cache[path] = self._load_module_source(name, path)
+ found_in_cache = False
+
+ self._load_config_defs(name, self._module_cache[path], path)
+
+ obj = getattr(self._module_cache[path], self.class_name)
+
+ if self.base_class:
+ # The import path is hardcoded and should be the right place,
+ # so we are not expecting an ImportError.
+ module = __import__(self.package, fromlist=[self.base_class])
+ # Check whether this obj has the required base class.
+ try:
+ plugin_class = getattr(module, self.base_class)
+ except AttributeError:
+ return get_with_context_result(None, plugin_load_context)
+ if not issubclass(obj, plugin_class):
+ return get_with_context_result(None, plugin_load_context)
+
+ # FIXME: update this to use the load context
+ self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
+
+ if not class_only:
+ try:
+ # A plugin may need to use its _load_name in __init__ (for example, to set
+ # or get options from config), so update the object before using the constructor
+ instance = object.__new__(obj)
+ self._update_object(instance, name, path, redirected_names, fq_name)
+ obj.__init__(instance, *args, **kwargs) # pylint: disable=unnecessary-dunder-call
+ obj = instance
+ except TypeError as e:
+ if "abstract" in e.args[0]:
+ # Abstract Base Class or incomplete plugin, don't load
+ display.v('Returning not found on "%s" as it has unimplemented abstract methods; %s' % (name, to_native(e)))
+ return get_with_context_result(None, plugin_load_context)
+ raise
+
+ self._update_object(obj, name, path, redirected_names, fq_name)
+ return get_with_context_result(obj, plugin_load_context)
+
+ def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
+ ''' formats data to display debug info for plugin loading, also avoids processing unless really needed '''
+ if C.DEFAULT_DEBUG:
+ msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
+
+ if len(searched_paths) > 1:
+ msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
+
+ if found_in_cache or class_only:
+ msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
+
+ display.debug(msg)
+
+ def all(self, *args, **kwargs):
+ '''
+ Iterate through all plugins of this type, in configured paths (no collections)
+
+ A plugin loader is initialized with a specific type. This function is an iterator returning
+ all of the plugins of that type to the caller.
+
+ :kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
+ instead of an instance of the plugin. This conflicts with class_only and both should
+ not be set.
+ :kwarg class_only: If this is set to True then we return the python class which implements
+ a plugin rather than an instance of the plugin. This conflicts with path_only and both
+ should not be set.
+ :kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
+ in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
+ should take precedence. If this is set to False, then we return all of the plugins
+ found, including those with duplicate names. In the case of duplicates, the order in
+ which they are returned is the one that would take precedence first, followed by the
+ others in decreasing precedence order. This should only be used by subclasses which
+ want to manage their own deduplication of the plugins.
+ :*args: Any extra arguments are passed to each plugin when it is instantiated.
+ :**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
+ '''
+ # TODO: Change the signature of this method to:
+ # def all(return_type='instance', args=None, kwargs=None):
+ # if args is None: args = []
+ # if kwargs is None: kwargs = {}
+ # return_type can be instance, class, or path.
+ # These changes will mean that plugin parameters won't conflict with our params and
+ # will also make it impossible to request both a path and a class at the same time.
+ #
+ # Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
+ # tests setting it to True
+
+ dedupe = kwargs.pop('_dedupe', True)
+ path_only = kwargs.pop('path_only', False)
+ class_only = kwargs.pop('class_only', False)
+ # Having both path_only and class_only is a coding bug
+ if path_only and class_only:
+ raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
+
+ all_matches = []
+ found_in_cache = True
+
+ legacy_excluding_builtin = set()
+ for path_with_context in self._get_paths_with_context():
+ matches = glob.glob(to_native(os.path.join(path_with_context.path, "*.py")))
+ if not path_with_context.internal:
+ legacy_excluding_builtin.update(matches)
+ # we sort within each path, but keep path precedence from config
+ all_matches.extend(sorted(matches, key=os.path.basename))
+
+ loaded_modules = set()
+ for path in all_matches:
+ name = os.path.splitext(path)[0]
+ basename = os.path.basename(name)
+
+ if basename in _PLUGIN_FILTERS[self.package]:
+ display.debug("'%s' skipped due to a defined plugin filter" % basename)
+ continue
+
+ if basename == '__init__' or (basename == 'base' and self.package == 'ansible.plugins.cache'):
+ # cache has legacy 'base.py' file, which is wrapper for __init__.py
+ display.debug("'%s' skipped due to reserved name" % basename)
+ continue
+
+ if dedupe and basename in loaded_modules:
+ display.debug("'%s' skipped as duplicate" % basename)
+ continue
+
+ loaded_modules.add(basename)
+
+ if path_only:
+ yield path
+ continue
+
+ if path not in self._module_cache:
+ if self.type in ('filter', 'test'):
+ # filter and test plugin files can contain multiple plugins
+ # they must have a unique python module name to prevent them from shadowing each other
+ full_name = '{0}_{1}'.format(abs(hash(path)), basename)
+ else:
+ full_name = basename
+
+ try:
+ module = self._load_module_source(full_name, path)
+ except Exception as e:
+ display.warning("Skipping plugin (%s), cannot load: %s" % (path, to_text(e)))
+ continue
+
+ self._module_cache[path] = module
+ found_in_cache = False
+ else:
+ module = self._module_cache[path]
+
+ self._load_config_defs(basename, module, path)
+
+ try:
+ obj = getattr(module, self.class_name)
+ except AttributeError as e:
+ display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
+ continue
+
+ if self.base_class:
+ # The import path is hardcoded and should be the right place,
+ # so we are not expecting an ImportError.
+ module = __import__(self.package, fromlist=[self.base_class])
+ # Check whether this obj has the required base class.
+ try:
+ plugin_class = getattr(module, self.base_class)
+ except AttributeError:
+ continue
+ if not issubclass(obj, plugin_class):
+ continue
+
+ self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
+
+ if not class_only:
+ try:
+ obj = obj(*args, **kwargs)
+ except TypeError as e:
+ display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
+
+ if path in legacy_excluding_builtin:
+ fqcn = basename
+ else:
+ fqcn = f"ansible.builtin.{basename}"
+ self._update_object(obj, basename, path, resolved=fqcn)
+ yield obj
+
+
+class Jinja2Loader(PluginLoader):
+ """
+ PluginLoader optimized for Jinja2 plugins
+
+ The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
+ We need to do a few things differently in the base class because of file == plugin
+ assumptions and dedupe logic.
+ """
+ def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
+
+ super(Jinja2Loader, self).__init__(class_name, package, config, subdir, aliases=aliases, required_base_class=required_base_class)
+ self._loaded_j2_file_maps = []
+
+ def _clear_caches(self):
+ super(Jinja2Loader, self)._clear_caches()
+ self._loaded_j2_file_maps = []
+
+ def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
+
+ # TODO: handle collection plugin find, see 'get_with_context'
+ # this can really 'find plugin file'
+ plugin = super(Jinja2Loader, self).find_plugin(name, mod_type=mod_type, ignore_deprecated=ignore_deprecated, check_aliases=check_aliases,
+ collection_list=collection_list)
+
+ # if not found, try loading all non collection plugins and see if this in there
+ if not plugin:
+ all_plugins = self.all()
+ plugin = all_plugins.get(name, None)
+
+ return plugin
+
+ @property
+ def method_map_name(self):
+ return get_plugin_class(self.class_name) + 's'
+
+ def get_contained_plugins(self, collection, plugin_path, name):
+
+ plugins = []
+
+ full_name = '.'.join(['ansible_collections', collection, 'plugins', self.type, name])
+ try:
+ # use 'parent' loader class to find files, but cannot return this as it can contain multiple plugins per file
+ if plugin_path not in self._module_cache:
+ self._module_cache[plugin_path] = self._load_module_source(full_name, plugin_path)
+ module = self._module_cache[plugin_path]
+ obj = getattr(module, self.class_name)
+ except Exception as e:
+ raise KeyError('Failed to load %s for %s: %s' % (plugin_path, collection, to_native(e)))
+
+ plugin_impl = obj()
+ if plugin_impl is None:
+ raise KeyError('Could not find %s.%s' % (collection, name))
+
+ try:
+ method_map = getattr(plugin_impl, self.method_map_name)
+ plugin_map = method_map().items()
+ except Exception as e:
+ display.warning("Ignoring %s plugins in '%s' as it seems to be invalid: %r" % (self.type, to_text(plugin_path), e))
+ return plugins
+
+ for func_name, func in plugin_map:
+ fq_name = '.'.join((collection, func_name))
+ full = '.'.join((full_name, func_name))
+ pclass = self._load_jinja2_class()
+ plugin = pclass(func)
+ if plugin in plugins:
+ continue
+ self._update_object(plugin, full, plugin_path, resolved=fq_name)
+ plugins.append(plugin)
+
+ return plugins
+
+ def get_with_context(self, name, *args, **kwargs):
+
+ # found_in_cache = True
+ class_only = kwargs.pop('class_only', False) # just pop it, dont want to pass through
+ collection_list = kwargs.pop('collection_list', None)
+
+ context = PluginLoadContext()
+
+ # avoid collection path for legacy
+ name = name.removeprefix('ansible.legacy.')
+
+ if '.' not in name:
+ # Filter/tests must always be FQCN except builtin and legacy
+ for known_plugin in self.all(*args, **kwargs):
+ if known_plugin.matches_name([name]):
+ context.resolved = True
+ context.plugin_resolved_name = name
+ context.plugin_resolved_path = known_plugin._original_path
+ context.plugin_resolved_collection = 'ansible.builtin' if known_plugin.ansible_name.startswith('ansible.builtin.') else ''
+ context._resolved_fqcn = known_plugin.ansible_name
+ return get_with_context_result(known_plugin, context)
+
+ plugin = None
+ key, leaf_key = get_fqcr_and_name(name)
+ seen = set()
+
+ # follow the meta!
+ while True:
+
+ if key in seen:
+ raise AnsibleError('recursive collection redirect found for %r' % name, 0)
+ seen.add(key)
+
+ acr = AnsibleCollectionRef.try_parse_fqcr(key, self.type)
+ if not acr:
+ raise KeyError('invalid plugin name: {0}'.format(key))
+
+ try:
+ ts = _get_collection_metadata(acr.collection)
+ except ValueError as e:
+ # no collection
+ raise KeyError('Invalid plugin FQCN ({0}): {1}'.format(key, to_native(e)))
+
+ # TODO: implement cycle detection (unified across collection redir as well)
+ routing_entry = ts.get('plugin_routing', {}).get(self.type, {}).get(leaf_key, {})
+
+ # check deprecations
+ deprecation_entry = routing_entry.get('deprecation')
+ if deprecation_entry:
+ warning_text = deprecation_entry.get('warning_text')
+ removal_date = deprecation_entry.get('removal_date')
+ removal_version = deprecation_entry.get('removal_version')
+
+ if not warning_text:
+ warning_text = '{0} "{1}" is deprecated'.format(self.type, key)
+
+ display.deprecated(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection)
+
+ # check removal
+ tombstone_entry = routing_entry.get('tombstone')
+ if tombstone_entry:
+ warning_text = tombstone_entry.get('warning_text')
+ removal_date = tombstone_entry.get('removal_date')
+ removal_version = tombstone_entry.get('removal_version')
+
+ if not warning_text:
+ warning_text = '{0} "{1}" has been removed'.format(self.type, key)
+
+ exc_msg = display.get_deprecation_message(warning_text, version=removal_version, date=removal_date,
+ collection_name=acr.collection, removed=True)
+
+ raise AnsiblePluginRemovedError(exc_msg)
+
+ # check redirects
+ redirect = routing_entry.get('redirect', None)
+ if redirect:
+ if not AnsibleCollectionRef.is_valid_fqcr(redirect):
+ raise AnsibleError(
+ f"Collection {acr.collection} contains invalid redirect for {acr.collection}.{acr.resource}: {redirect}. "
+ "Redirects must use fully qualified collection names."
+ )
+
+ next_key, leaf_key = get_fqcr_and_name(redirect, collection=acr.collection)
+ display.vvv('redirecting (type: {0}) {1}.{2} to {3}'.format(self.type, acr.collection, acr.resource, next_key))
+ key = next_key
+ else:
+ break
+
+ try:
+ pkg = import_module(acr.n_python_package_name)
+ except ImportError as e:
+ raise KeyError(to_native(e))
+
+ parent_prefix = acr.collection
+ if acr.subdirs:
+ parent_prefix = '{0}.{1}'.format(parent_prefix, acr.subdirs)
+
+ try:
+ for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=parent_prefix + '.'):
+ if ispkg:
+ continue
+
+ try:
+ # use 'parent' loader class to find files, but cannot return this as it can contain
+ # multiple plugins per file
+ plugin_impl = super(Jinja2Loader, self).get_with_context(module_name, *args, **kwargs)
+ except Exception as e:
+ raise KeyError(to_native(e))
+
+ try:
+ method_map = getattr(plugin_impl.object, self.method_map_name)
+ plugin_map = method_map().items()
+ except Exception as e:
+ display.warning("Skipping %s plugins in '%s' as it seems to be invalid: %r" % (self.type, to_text(plugin_impl.object._original_path), e))
+ continue
+
+ for func_name, func in plugin_map:
+ fq_name = '.'.join((parent_prefix, func_name))
+ src_name = f"ansible_collections.{acr.collection}.plugins.{self.type}.{acr.subdirs}.{func_name}"
+ # TODO: load anyways into CACHE so we only match each at end of loop
+ # the files themseves should already be cached by base class caching of modules(python)
+ if key in (func_name, fq_name):
+ pclass = self._load_jinja2_class()
+ plugin = pclass(func)
+ if plugin:
+ context = plugin_impl.plugin_load_context
+ self._update_object(plugin, src_name, plugin_impl.object._original_path, resolved=fq_name)
+ break # go to next file as it can override if dupe (dont break both loops)
+
+ except AnsiblePluginRemovedError as apre:
+ raise AnsibleError(to_native(apre), 0, orig_exc=apre)
+ except (AnsibleError, KeyError):
+ raise
+ except Exception as ex:
+ display.warning('An unexpected error occurred during Jinja2 plugin loading: {0}'.format(to_native(ex)))
+ display.vvv('Unexpected error during Jinja2 plugin loading: {0}'.format(format_exc()))
+ raise AnsibleError(to_native(ex), 0, orig_exc=ex)
+
+ return get_with_context_result(plugin, context)
+
+ def all(self, *args, **kwargs):
+
+ # inputs, we ignore 'dedupe' we always do, used in base class to find files for this one
+ path_only = kwargs.pop('path_only', False)
+ class_only = kwargs.pop('class_only', False) # basically ignored for test/filters since they are functions
+
+ # Having both path_only and class_only is a coding bug
+ if path_only and class_only:
+ raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
+
+ found = set()
+ # get plugins from files in configured paths (multiple in each)
+ for p_map in self._j2_all_file_maps(*args, **kwargs):
+
+ # p_map is really object from file with class that holds multiple plugins
+ plugins_list = getattr(p_map, self.method_map_name)
+ try:
+ plugins = plugins_list()
+ except Exception as e:
+ display.vvvv("Skipping %s plugins in '%s' as it seems to be invalid: %r" % (self.type, to_text(p_map._original_path), e))
+ continue
+
+ for plugin_name in plugins.keys():
+ if plugin_name in _PLUGIN_FILTERS[self.package]:
+ display.debug("%s skipped due to a defined plugin filter" % plugin_name)
+ continue
+
+ if plugin_name in found:
+ display.debug("%s skipped as duplicate" % plugin_name)
+ continue
+
+ if path_only:
+ result = p_map._original_path
+ else:
+ # loader class is for the file with multiple plugins, but each plugin now has it's own class
+ pclass = self._load_jinja2_class()
+ result = pclass(plugins[plugin_name]) # if bad plugin, let exception rise
+ found.add(plugin_name)
+ fqcn = plugin_name
+ collection = '.'.join(p_map.ansible_name.split('.')[:2]) if p_map.ansible_name.count('.') >= 2 else ''
+ if not plugin_name.startswith(collection):
+ fqcn = f"{collection}.{plugin_name}"
+
+ self._update_object(result, plugin_name, p_map._original_path, resolved=fqcn)
+ yield result
+
+ def _load_jinja2_class(self):
+ """ override the normal method of plugin classname as these are used in the generic funciton
+ to access the 'multimap' of filter/tests to function, this is a 'singular' plugin for
+ each entry.
+ """
+ class_name = 'AnsibleJinja2%s' % get_plugin_class(self.class_name).capitalize()
+ module = __import__(self.package, fromlist=[class_name])
+
+ return getattr(module, class_name)
+
+ def _j2_all_file_maps(self, *args, **kwargs):
+ """
+ * Unlike other plugin types, file != plugin, a file can contain multiple plugins (of same type).
+ This is why we do not deduplicate ansible file names at this point, we mostly care about
+ the names of the actual jinja2 plugins which are inside of our files.
+ * This method will NOT fetch collection plugin files, only those that would be expected under 'ansible.builtin/legacy'.
+ """
+ # populate cache if needed
+ if not self._loaded_j2_file_maps:
+
+ # We don't deduplicate ansible file names.
+ # Instead, calling code deduplicates jinja2 plugin names when loading each file.
+ kwargs['_dedupe'] = False
+
+ # To match correct precedence, call base class' all() to get a list of files,
+ self._loaded_j2_file_maps = list(super(Jinja2Loader, self).all(*args, **kwargs))
+
+ return self._loaded_j2_file_maps
+
+
+def get_fqcr_and_name(resource, collection='ansible.builtin'):
+ if '.' not in resource:
+ name = resource
+ fqcr = collection + '.' + resource
+ else:
+ name = resource.split('.')[-1]
+ fqcr = resource
+
+ return fqcr, name
+
+
+def _load_plugin_filter():
+ filters = defaultdict(frozenset)
+ user_set = False
+ if C.PLUGIN_FILTERS_CFG is None:
+ filter_cfg = '/etc/ansible/plugin_filters.yml'
+ else:
+ filter_cfg = C.PLUGIN_FILTERS_CFG
+ user_set = True
+
+ if os.path.exists(filter_cfg):
+ with open(filter_cfg, 'rb') as f:
+ try:
+ filter_data = from_yaml(f.read())
+ except Exception as e:
+ display.warning(u'The plugin filter file, {0} was not parsable.'
+ u' Skipping: {1}'.format(filter_cfg, to_text(e)))
+ return filters
+
+ try:
+ version = filter_data['filter_version']
+ except KeyError:
+ display.warning(u'The plugin filter file, {0} was invalid.'
+ u' Skipping.'.format(filter_cfg))
+ return filters
+
+ # Try to convert for people specifying version as a float instead of string
+ version = to_text(version)
+ version = version.strip()
+
+ if version == u'1.0':
+ # Modules and action plugins share the same blacklist since the difference between the
+ # two isn't visible to the users
+ try:
+ # reject list was documented but we never changed the code from blacklist, will be deprected in 2.15
+ filters['ansible.modules'] = frozenset(filter_data.get('module_rejectlist)', filter_data['module_blacklist']))
+ except TypeError:
+ display.warning(u'Unable to parse the plugin filter file {0} as'
+ u' module_blacklist is not a list.'
+ u' Skipping.'.format(filter_cfg))
+ return filters
+ filters['ansible.plugins.action'] = filters['ansible.modules']
+ else:
+ display.warning(u'The plugin filter file, {0} was a version not recognized by this'
+ u' version of Ansible. Skipping.'.format(filter_cfg))
+ else:
+ if user_set:
+ display.warning(u'The plugin filter file, {0} does not exist.'
+ u' Skipping.'.format(filter_cfg))
+
+ # Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
+ if 'stat' in filters['ansible.modules']:
+ raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
+ ' Ansible will not function without the stat module. Please remove stat'
+ ' from the blacklist.'.format(to_native(filter_cfg)))
+ return filters
+
+
+# since we don't want the actual collection loader understanding metadata, we'll do it in an event handler
+def _on_collection_load_handler(collection_name, collection_path):
+ display.vvvv(to_text('Loading collection {0} from {1}'.format(collection_name, collection_path)))
+
+ collection_meta = _get_collection_metadata(collection_name)
+
+ try:
+ if not _does_collection_support_ansible_version(collection_meta.get('requires_ansible', ''), ansible_version):
+ mismatch_behavior = C.config.get_config_value('COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH')
+ message = 'Collection {0} does not support Ansible version {1}'.format(collection_name, ansible_version)
+ if mismatch_behavior == 'warning':
+ display.warning(message)
+ elif mismatch_behavior == 'error':
+ raise AnsibleCollectionUnsupportedVersionError(message)
+ except AnsibleError:
+ raise
+ except Exception as ex:
+ display.warning('Error parsing collection metadata requires_ansible value from collection {0}: {1}'.format(collection_name, ex))
+
+
+def _does_collection_support_ansible_version(requirement_string, ansible_version):
+ if not requirement_string:
+ return True
+
+ if not SpecifierSet:
+ display.warning('packaging Python module unavailable; unable to validate collection Ansible version requirements')
+ return True
+
+ ss = SpecifierSet(requirement_string)
+
+ # ignore prerelease/postrelease/beta/dev flags for simplicity
+ base_ansible_version = Version(ansible_version).base_version
+
+ return ss.contains(base_ansible_version)
+
+
+def _configure_collection_loader():
+ if AnsibleCollectionConfig.collection_finder:
+ # this must be a Python warning so that it can be filtered out by the import sanity test
+ warnings.warn('AnsibleCollectionFinder has already been configured')
+ return
+
+ finder = _AnsibleCollectionFinder(C.COLLECTIONS_PATHS, C.COLLECTIONS_SCAN_SYS_PATH)
+ finder._install()
+
+ # this should succeed now
+ AnsibleCollectionConfig.on_collection_load += _on_collection_load_handler
+
+
+# TODO: All of the following is initialization code It should be moved inside of an initialization
+# function which is called at some point early in the ansible and ansible-playbook CLI startup.
+
+_PLUGIN_FILTERS = _load_plugin_filter()
+
+_configure_collection_loader()
+
+# doc fragments first
+fragment_loader = PluginLoader(
+ 'ModuleDocFragment',
+ 'ansible.plugins.doc_fragments',
+ C.DOC_FRAGMENT_PLUGIN_PATH,
+ 'doc_fragments',
+)
+
+action_loader = PluginLoader(
+ 'ActionModule',
+ 'ansible.plugins.action',
+ C.DEFAULT_ACTION_PLUGIN_PATH,
+ 'action_plugins',
+ required_base_class='ActionBase',
+)
+
+cache_loader = PluginLoader(
+ 'CacheModule',
+ 'ansible.plugins.cache',
+ C.DEFAULT_CACHE_PLUGIN_PATH,
+ 'cache_plugins',
+)
+
+callback_loader = PluginLoader(
+ 'CallbackModule',
+ 'ansible.plugins.callback',
+ C.DEFAULT_CALLBACK_PLUGIN_PATH,
+ 'callback_plugins',
+)
+
+connection_loader = PluginLoader(
+ 'Connection',
+ 'ansible.plugins.connection',
+ C.DEFAULT_CONNECTION_PLUGIN_PATH,
+ 'connection_plugins',
+ aliases={'paramiko': 'paramiko_ssh'},
+ required_base_class='ConnectionBase',
+)
+
+shell_loader = PluginLoader(
+ 'ShellModule',
+ 'ansible.plugins.shell',
+ 'shell_plugins',
+ 'shell_plugins',
+)
+
+module_loader = PluginLoader(
+ '',
+ 'ansible.modules',
+ C.DEFAULT_MODULE_PATH,
+ 'library',
+)
+
+module_utils_loader = PluginLoader(
+ '',
+ 'ansible.module_utils',
+ C.DEFAULT_MODULE_UTILS_PATH,
+ 'module_utils',
+)
+
+# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
+# regular module_utils doesn't. This can be revisited once we have more granular loaders.
+ps_module_utils_loader = PluginLoader(
+ '',
+ 'ansible.module_utils',
+ C.DEFAULT_MODULE_UTILS_PATH,
+ 'module_utils',
+)
+
+lookup_loader = PluginLoader(
+ 'LookupModule',
+ 'ansible.plugins.lookup',
+ C.DEFAULT_LOOKUP_PLUGIN_PATH,
+ 'lookup_plugins',
+ required_base_class='LookupBase',
+)
+
+filter_loader = Jinja2Loader(
+ 'FilterModule',
+ 'ansible.plugins.filter',
+ C.DEFAULT_FILTER_PLUGIN_PATH,
+ 'filter_plugins',
+)
+
+test_loader = Jinja2Loader(
+ 'TestModule',
+ 'ansible.plugins.test',
+ C.DEFAULT_TEST_PLUGIN_PATH,
+ 'test_plugins'
+)
+
+strategy_loader = PluginLoader(
+ 'StrategyModule',
+ 'ansible.plugins.strategy',
+ C.DEFAULT_STRATEGY_PLUGIN_PATH,
+ 'strategy_plugins',
+ required_base_class='StrategyBase',
+)
+
+terminal_loader = PluginLoader(
+ 'TerminalModule',
+ 'ansible.plugins.terminal',
+ C.DEFAULT_TERMINAL_PLUGIN_PATH,
+ 'terminal_plugins',
+ required_base_class='TerminalBase'
+)
+
+vars_loader = PluginLoader(
+ 'VarsModule',
+ 'ansible.plugins.vars',
+ C.DEFAULT_VARS_PLUGIN_PATH,
+ 'vars_plugins',
+)
+
+cliconf_loader = PluginLoader(
+ 'Cliconf',
+ 'ansible.plugins.cliconf',
+ C.DEFAULT_CLICONF_PLUGIN_PATH,
+ 'cliconf_plugins',
+ required_base_class='CliconfBase'
+)
+
+netconf_loader = PluginLoader(
+ 'Netconf',
+ 'ansible.plugins.netconf',
+ C.DEFAULT_NETCONF_PLUGIN_PATH,
+ 'netconf_plugins',
+ required_base_class='NetconfBase'
+)
+
+inventory_loader = PluginLoader(
+ 'InventoryModule',
+ 'ansible.plugins.inventory',
+ C.DEFAULT_INVENTORY_PLUGIN_PATH,
+ 'inventory_plugins'
+)
+
+httpapi_loader = PluginLoader(
+ 'HttpApi',
+ 'ansible.plugins.httpapi',
+ C.DEFAULT_HTTPAPI_PLUGIN_PATH,
+ 'httpapi_plugins',
+ required_base_class='HttpApiBase',
+)
+
+become_loader = PluginLoader(
+ 'BecomeModule',
+ 'ansible.plugins.become',
+ C.BECOME_PLUGIN_PATH,
+ 'become_plugins'
+)
diff --git a/lib/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py
new file mode 100644
index 0000000..470f060
--- /dev/null
+++ b/lib/ansible/plugins/lookup/__init__.py
@@ -0,0 +1,130 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+
+from ansible.errors import AnsibleFileNotFound
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.display import Display
+
+display = Display()
+
+__all__ = ['LookupBase']
+
+
+class LookupBase(AnsiblePlugin):
+
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupBase, self).__init__()
+
+ self._loader = loader
+ self._templar = templar
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ def get_basedir(self, variables):
+ if 'role_path' in variables:
+ return variables['role_path']
+ else:
+ return self._loader.get_basedir()
+
+ @staticmethod
+ def _flatten(terms):
+ ret = []
+ for term in terms:
+ if isinstance(term, (list, tuple)):
+ ret.extend(term)
+ else:
+ ret.append(term)
+ return ret
+
+ @staticmethod
+ def _combine(a, b):
+ results = []
+ for x in a:
+ for y in b:
+ results.append(LookupBase._flatten([x, y]))
+ return results
+
+ @staticmethod
+ def _flatten_hash_to_list(terms):
+ ret = []
+ for key in terms:
+ ret.append({'key': key, 'value': terms[key]})
+ return ret
+
+ @abstractmethod
+ def run(self, terms, variables=None, **kwargs):
+ """
+ When the playbook specifies a lookup, this method is run. The
+ arguments to the lookup become the arguments to this method. One
+ additional keyword argument named ``variables`` is added to the method
+ call. It contains the variables available to ansible at the time the
+ lookup is templated. For instance::
+
+ "{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
+
+ would end up calling the lookup plugin named url's run method like this::
+ run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
+
+ Lookup plugins can be used within playbooks for looping. When this
+ happens, the first argument is a list containing the terms. Lookup
+ plugins can also be called from within playbooks to return their
+ values into a variable or parameter. If the user passes a string in
+ this case, it is converted into a list.
+
+ Errors encountered during execution should be returned by raising
+ AnsibleError() with a message describing the error.
+
+ Any strings returned by this method that could ever contain non-ascii
+ must be converted into python's unicode type as the strings will be run
+ through jinja2 which has this requirement. You can use::
+
+ from ansible.module_utils._text import to_text
+ result_string = to_text(result_string)
+ """
+ pass
+
+ def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
+ '''
+ Return a file (needle) in the task's expected search path.
+ '''
+
+ if 'ansible_search_path' in myvars:
+ paths = myvars['ansible_search_path']
+ else:
+ paths = [self.get_basedir(myvars)]
+
+ result = None
+ try:
+ result = self._loader.path_dwim_relative_stack(paths, subdir, needle)
+ except AnsibleFileNotFound:
+ if not ignore_missing:
+ self._display.warning("Unable to find '%s' in expected paths (use -vvvvv to see paths)" % needle)
+
+ return result
+
+ def _deprecate_inline_kv(self):
+ # TODO: place holder to deprecate in future version allowing for long transition period
+ # self._display.deprecated('Passing inline k=v values embedded in a string to this lookup. Use direct ,k=v, k2=v2 syntax instead.', version='2.18')
+ pass
diff --git a/lib/ansible/plugins/lookup/config.py b/lib/ansible/plugins/lookup/config.py
new file mode 100644
index 0000000..3e5529b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/config.py
@@ -0,0 +1,156 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: config
+ author: Ansible Core Team
+ version_added: "2.5"
+ short_description: Lookup current Ansible configuration values
+ description:
+ - Retrieves the value of an Ansible configuration setting.
+ - You can use C(ansible-config list) to see all available settings.
+ options:
+ _terms:
+ description: The key(s) to look up
+ required: True
+ on_missing:
+ description:
+ - action to take if term is missing from config
+ - Error will raise a fatal error
+ - Skip will just ignore the term
+ - Warn will skip over it but issue a warning
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+ plugin_type:
+ description: the type of the plugin referenced by 'plugin_name' option.
+ choices: ['become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars']
+ type: string
+ version_added: '2.12'
+ plugin_name:
+ description: name of the plugin for which you want to retrieve configuration settings.
+ type: string
+ version_added: '2.12'
+"""
+
+EXAMPLES = """
+ - name: Show configured default become user
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.config', 'DEFAULT_BECOME_USER')}}"
+
+ - name: print out role paths
+ ansible.builtin.debug:
+ msg: "These are the configured role paths: {{lookup('ansible.builtin.config', 'DEFAULT_ROLES_PATH')}}"
+
+ - name: find retry files, skip if missing that key
+ ansible.builtin.find:
+ paths: "{{lookup('ansible.builtin.config', 'RETRY_FILES_SAVE_PATH')|default(playbook_dir, True)}}"
+ patterns: "*.retry"
+
+ - name: see the colors
+ ansible.builtin.debug: msg="{{item}}"
+ loop: "{{lookup('ansible.builtin.config', 'COLOR_OK', 'COLOR_CHANGED', 'COLOR_SKIP', wantlist=True)}}"
+
+ - name: skip if bad value in var
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.config', config_in_var, on_missing='skip')}}"
+ var:
+ config_in_var: UNKNOWN
+
+ - name: show remote user and port for ssh connection
+ ansible.builtin.debug: msg={{q("ansible.builtin.config", "remote_user", "port", plugin_type="connection", plugin_name="ssh", on_missing='skip')}}
+
+ - name: show remote_tmp setting for shell (sh) plugin
+ ansible.builtin.debug: msg={{q("ansible.builtin.config", "remote_tmp", plugin_type="shell", plugin_name="sh")}}
+"""
+
+RETURN = """
+_raw:
+ description:
+ - value(s) of the key(s) in the config
+ type: raw
+"""
+
+import ansible.plugins.loader as plugin_loader
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleLookupError, AnsibleOptionsError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.sentinel import Sentinel
+
+
+class MissingSetting(AnsibleOptionsError):
+ pass
+
+
+def _get_plugin_config(pname, ptype, config, variables):
+ try:
+ # plugin creates settings on load, this is cached so not too expensive to redo
+ loader = getattr(plugin_loader, '%s_loader' % ptype)
+ p = loader.get(pname, class_only=True)
+ if p is None:
+ raise AnsibleLookupError('Unable to load %s plugin "%s"' % (ptype, pname))
+ result = C.config.get_config_value(config, plugin_type=ptype, plugin_name=p._load_name, variables=variables)
+ except AnsibleLookupError:
+ raise
+ except AnsibleError as e:
+ msg = to_native(e)
+ if 'was not defined' in msg:
+ raise MissingSetting(msg, orig_exc=e)
+ raise e
+
+ return result
+
+
+def _get_global_config(config):
+ try:
+ result = getattr(C, config)
+ if callable(result):
+ raise AnsibleLookupError('Invalid setting "%s" attempted' % config)
+ except AttributeError as e:
+ raise MissingSetting(to_native(e), orig_exc=e)
+
+ return result
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ missing = self.get_option('on_missing')
+ ptype = self.get_option('plugin_type')
+ pname = self.get_option('plugin_name')
+
+ if (ptype or pname) and not (ptype and pname):
+ raise AnsibleOptionsError('Both plugin_type and plugin_name are required, cannot use one without the other')
+
+ if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']:
+ raise AnsibleOptionsError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing)
+
+ ret = []
+
+ for term in terms:
+ if not isinstance(term, string_types):
+ raise AnsibleOptionsError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+
+ result = Sentinel
+ try:
+ if pname:
+ result = _get_plugin_config(pname, ptype, term, variables)
+ else:
+ result = _get_global_config(term)
+ except MissingSetting as e:
+ if missing == 'error':
+ raise AnsibleLookupError('Unable to find setting %s' % term, orig_exc=e)
+ elif missing == 'warn':
+ self._display.warning('Skipping, did not find setting %s' % term)
+ elif missing == 'skip':
+ pass # this is not needed, but added to have all 3 options stated
+
+ if result is not Sentinel:
+ ret.append(result)
+ return ret
diff --git a/lib/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
new file mode 100644
index 0000000..5932d77
--- /dev/null
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -0,0 +1,181 @@
+# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ name: csvfile
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ version_added: "1.5"
+ short_description: read data from a TSV or CSV file
+ description:
+ - The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
+ The lookup looks for the row where the first column matches keyname (which can be multiple words)
+ and returns the value in the C(col) column (default 1, which indexed from 0 means the second column in the file).
+ options:
+ col:
+ description: column to return (0 indexed).
+ default: "1"
+ default:
+ description: what to return if the value is not found in the file.
+ delimiter:
+ description: field separator in the file, for a tab you can specify C(TAB) or C(\t).
+ default: TAB
+ file:
+ description: name of the CSV/TSV file to open.
+ default: ansible.csv
+ encoding:
+ description: Encoding (character set) of the used CSV file.
+ default: utf-8
+ version_added: "2.1"
+ notes:
+ - The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
+ - As of version 2.11, the search parameter (text that must match the first column of the file) and filename parameter can be multi-word.
+ - For historical reasons, in the search keyname, quotes are treated
+ literally and cannot be used around the string unless they appear
+ (escaped as required) in the first column of the file you are parsing.
+"""
+
+EXAMPLES = """
+- name: Match 'Li' on the first column, return the second column (0 based index)
+ ansible.builtin.debug: msg="The atomic number of Lithium is {{ lookup('ansible.builtin.csvfile', 'Li file=elements.csv delimiter=,') }}"
+
+- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
+ ansible.builtin.debug: msg="The atomic mass of Lithium is {{ lookup('ansible.builtin.csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
+
+- name: Define Values From CSV File, this reads file in one go, but you could also use col= to read each in it's own lookup.
+ ansible.builtin.set_fact:
+ loop_ip: "{{ csvline[0] }}"
+ int_ip: "{{ csvline[1] }}"
+ int_mask: "{{ csvline[2] }}"
+ int_name: "{{ csvline[3] }}"
+ local_as: "{{ csvline[4] }}"
+ neighbor_as: "{{ csvline[5] }}"
+ neigh_int_ip: "{{ csvline[6] }}"
+ vars:
+ csvline = "{{ lookup('ansible.builtin.csvfile', bgp_neighbor_ip, file='bgp_neighbors.csv', delimiter=',') }}"
+ delegate_to: localhost
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - value(s) stored in file column
+ type: list
+ elements: str
+"""
+
+import codecs
+import csv
+
+from collections.abc import MutableSequence
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.six import PY2
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+class CSVRecoder:
+ """
+ Iterator that reads an encoded stream and reencodes the input to UTF-8
+ """
+ def __init__(self, f, encoding='utf-8'):
+ self.reader = codecs.getreader(encoding)(f)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self.reader).encode("utf-8")
+
+ next = __next__ # For Python 2
+
+
+class CSVReader:
+ """
+ A CSV reader which will iterate over lines in the CSV file "f",
+ which is encoded in the given encoding.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
+ if PY2:
+ f = CSVRecoder(f, encoding)
+ else:
+ f = codecs.getreader(encoding)(f)
+
+ self.reader = csv.reader(f, dialect=dialect, **kwds)
+
+ def __next__(self):
+ row = next(self.reader)
+ return [to_text(s) for s in row]
+
+ next = __next__ # For Python 2
+
+ def __iter__(self):
+ return self
+
+
+class LookupModule(LookupBase):
+
+ def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
+
+ try:
+ f = open(to_bytes(filename), 'rb')
+ creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
+
+ for row in creader:
+ if len(row) and row[0] == key:
+ return row[int(col)]
+ except Exception as e:
+ raise AnsibleError("csvfile: %s" % to_native(e))
+
+ return dflt
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ # populate options
+ paramvals = self.get_options()
+
+ for term in terms:
+ kv = parse_kv(term)
+
+ if '_raw_params' not in kv:
+ raise AnsibleError('Search key is required but was not found')
+
+ key = kv['_raw_params']
+
+ # parameters override per term using k/v
+ try:
+ for name, value in kv.items():
+ if name == '_raw_params':
+ continue
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s is not a valid option' % name)
+
+ self._deprecate_inline_kv()
+ paramvals[name] = value
+
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ # default is just placeholder for real tab
+ if paramvals['delimiter'] == 'TAB':
+ paramvals['delimiter'] = "\t"
+
+ lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+ var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
+ if var is not None:
+ if isinstance(var, MutableSequence):
+ for v in var:
+ ret.append(v)
+ else:
+ ret.append(var)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py
new file mode 100644
index 0000000..af9a081
--- /dev/null
+++ b/lib/ansible/plugins/lookup/dict.py
@@ -0,0 +1,77 @@
+# (c) 2014, Kent R. Spillner <kspillner@acm.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: dict
+ version_added: "1.5"
+ short_description: returns key/value pair items from dictionaries
+ description:
+ - Takes dictionaries as input and returns a list with each item in the list being a dictionary with 'key' and 'value' as
+ keys to the previous dictionary's structure.
+ options:
+ _terms:
+ description:
+ - A list of dictionaries
+ required: True
+"""
+
+EXAMPLES = """
+vars:
+ users:
+ alice:
+ name: Alice Appleworth
+ telephone: 123-456-7890
+ bob:
+ name: Bob Bananarama
+ telephone: 987-654-3210
+tasks:
+ # with predefined vars
+ - name: Print phone records
+ ansible.builtin.debug:
+ msg: "User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})"
+ loop: "{{ lookup('ansible.builtin.dict', users) }}"
+ # with inline dictionary
+ - name: show dictionary
+ ansible.builtin.debug:
+ msg: "{{item.key}}: {{item.value}}"
+ with_dict: {a: 1, b: 2, c: 3}
+ # Items from loop can be used in when: statements
+ - name: set_fact when alice in key
+ ansible.builtin.set_fact:
+ alice_exists: true
+ loop: "{{ lookup('ansible.builtin.dict', users) }}"
+ when: "'alice' in item.key"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of composed dictonaries with key and value
+ type: list
+"""
+
+from collections.abc import Mapping
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ # NOTE: can remove if with_ is removed
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ results = []
+ for term in terms:
+ # Expect any type of Mapping, notably hostvars
+ if not isinstance(term, Mapping):
+ raise AnsibleError("with_dict expects a dict")
+
+ results.extend(self._flatten_hash_to_list(term))
+ return results
diff --git a/lib/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py
new file mode 100644
index 0000000..3c37b90
--- /dev/null
+++ b/lib/ansible/plugins/lookup/env.py
@@ -0,0 +1,79 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: env
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ version_added: "0.9"
+ short_description: Read the value of environment variables
+ description:
+ - Allows you to query the environment variables available on the
+ controller when you invoked Ansible.
+ options:
+ _terms:
+ description:
+ - Environment variable or list of them to lookup the values for.
+ required: True
+ default:
+ description: What return when the variable is undefined
+ type: raw
+ default: ''
+ version_added: '2.13'
+ notes:
+ - You can pass the C(Undefined) object as C(default) to force an undefined error
+"""
+
+EXAMPLES = """
+- name: Basic usage
+ ansible.builtin.debug:
+ msg: "'{{ lookup('ansible.builtin.env', 'HOME') }}' is the HOME environment variable."
+
+- name: Before 2.13, how to set default value if the variable is not defined.
+ This cannot distinguish between USR undefined and USR=''.
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.env', 'USR')|default('nobody', True) }} is the user."
+
+- name: Example how to set default value if the variable is not defined, ignores USR=''
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.env', 'USR', default='nobody') }} is the user."
+
+- name: Set default value to Undefined, if the variable is not defined
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.env', 'USR', default=Undefined) }} is the user."
+
+- name: Set default value to undef(), if the variable is not defined
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.env', 'USR', default=undef()) }} is the user."
+"""
+
+RETURN = """
+ _list:
+ description:
+ - Values from the environment variables.
+ type: list
+"""
+
+from jinja2.runtime import Undefined
+
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.plugins.lookup import LookupBase
+from ansible.utils import py3compat
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ ret = []
+ d = self.get_option('default')
+ for term in terms:
+ var = term.split()[0]
+ val = py3compat.environ.get(var, d)
+ if isinstance(val, Undefined):
+ raise AnsibleUndefinedVariable('The "env" lookup, found an undefined variable: %s' % var)
+ ret.append(val)
+ return ret
diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py
new file mode 100644
index 0000000..fa9191e
--- /dev/null
+++ b/lib/ansible/plugins/lookup/file.py
@@ -0,0 +1,88 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: file
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read file contents
+ description:
+ - This lookup returns the contents from a file on the Ansible controller's file system.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ rstrip:
+ description: whether or not to remove whitespace from the ending of the looked-up file
+ type: bool
+ required: False
+ default: True
+ lstrip:
+ description: whether or not to remove whitespace from the beginning of the looked-up file
+ type: bool
+ required: False
+ default: False
+ notes:
+ - if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
+ - this lookup does not understand 'globbing', use the fileglob lookup instead.
+"""
+
+EXAMPLES = """
+- ansible.builtin.debug:
+ msg: "the value of foo.txt is {{lookup('ansible.builtin.file', '/etc/foo.txt') }}"
+
+- name: display multiple file contents
+ ansible.builtin.debug: var=item
+ with_file:
+ - "/path/to/foo.txt"
+ - "bar.txt" # will be looked in files/ dir relative to play or in role
+ - "/path/to/biz.txt"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - content of file(s)
+ type: list
+ elements: str
+"""
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+ self.set_options(var_options=variables, direct=kwargs)
+
+ for term in terms:
+ display.debug("File lookup term: %s" % term)
+
+ # Find the file in the expected search path
+ lookupfile = self.find_file_in_search_path(variables, 'files', term)
+ display.vvvv(u"File lookup using %s as file" % lookupfile)
+ try:
+ if lookupfile:
+ b_contents, show_data = self._loader._get_file_contents(lookupfile)
+ contents = to_text(b_contents, errors='surrogate_or_strict')
+ if self.get_option('lstrip'):
+ contents = contents.lstrip()
+ if self.get_option('rstrip'):
+ contents = contents.rstrip()
+ ret.append(contents)
+ else:
+ raise AnsibleParserError()
+ except AnsibleParserError:
+ raise AnsibleError("could not locate file in lookup: %s" % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
new file mode 100644
index 0000000..abf8202
--- /dev/null
+++ b/lib/ansible/plugins/lookup/fileglob.py
@@ -0,0 +1,84 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: fileglob
+ author: Michael DeHaan
+ version_added: "1.4"
+ short_description: list files matching a pattern
+ description:
+ - Matches all files in a single directory, non-recursively, that match a pattern.
+ It calls Python's "glob" library.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ notes:
+ - Patterns are only supported on files, not directory/paths.
+ - See R(Ansible task paths,playbook_task_paths) to understand how file lookup occurs with paths.
+ - Matching is against local system files on the Ansible controller.
+ To iterate a list of files on a remote node, use the M(ansible.builtin.find) module.
+ - Returns a string list of paths joined by commas, or an empty list if no files match. For a 'true list' pass C(wantlist=True) to the lookup.
+"""
+
+EXAMPLES = """
+- name: Display paths of all .txt files in dir
+ ansible.builtin.debug: msg={{ lookup('ansible.builtin.fileglob', '/my/path/*.txt') }}
+
+- name: Copy each file over that matches the given pattern
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/etc/fooapp/"
+ owner: "root"
+ mode: 0600
+ with_fileglob:
+ - "/playbooks/files/fooapp/*"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of files
+ type: list
+ elements: path
+"""
+
+import os
+import glob
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ found_paths = []
+ if term_file != term:
+ found_paths.append(self.find_file_in_search_path(variables, 'files', os.path.dirname(term)))
+ else:
+ # no dir, just file, so use paths and 'files' paths instead
+ if 'ansible_search_path' in variables:
+ paths = variables['ansible_search_path']
+ else:
+ paths = [self.get_basedir(variables)]
+ for p in paths:
+ found_paths.append(os.path.join(p, 'files'))
+ found_paths.append(p)
+
+ for dwimmed_path in found_paths:
+ if dwimmed_path:
+ globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, term_file), errors='surrogate_or_strict'))
+ term_results = [to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g)]
+ if term_results:
+ ret.extend(term_results)
+ break
+ return ret
diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
new file mode 100644
index 0000000..5b94b10
--- /dev/null
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -0,0 +1,235 @@
+# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: first_found
+ author: Seth Vidal (!UNKNOWN) <skvidal@fedoraproject.org>
+ version_added: historical
+ short_description: return first file found from list
+ description:
+ - This lookup checks a list of files and paths and returns the full path to the first combination found.
+ - As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
+ to the containing locations of role / play / include and so on.
+ - The list of files has precedence over the paths searched.
+ For example, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
+ - Either a list of files C(_terms) or a key C(files) with a list of files is required for this plugin to operate.
+ notes:
+ - This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths).
+ options:
+ _terms:
+ description: A list of file names.
+ files:
+ description: A list of file names.
+ type: list
+ elements: string
+ default: []
+ paths:
+ description: A list of paths in which to look for the files.
+ type: list
+ elements: string
+ default: []
+ skip:
+ type: boolean
+ default: False
+ description:
+ - When C(True), return an empty list when no files are matched.
+ - This is useful when used with C(with_first_found), as an empty list return to C(with_) calls
+ causes the calling task to be skipped.
+ - When used as a template via C(lookup) or C(query), setting I(skip=True) will *not* cause the task to skip.
+ Tasks must handle the empty list return from the template.
+ - When C(False) and C(lookup) or C(query) specifies I(errors='ignore') all errors (including no file found,
+ but potentially others) return an empty string or an empty list respectively.
+ - When C(True) and C(lookup) or C(query) specifies I(errors='ignore'), no file found will return an empty
+ list and other potential errors return an empty string or empty list depending on the template call
+ (in other words return values of C(lookup) v C(query)).
+"""
+
+EXAMPLES = """
+- name: Set _found_file to the first existing file, raising an error if a file is not found
+ ansible.builtin.set_fact:
+ _found_file: "{{ lookup('ansible.builtin.first_found', findme) }}"
+ vars:
+ findme:
+ - /path/to/foo.txt
+ - bar.txt # will be looked in files/ dir relative to role and/or play
+ - /path/to/biz.txt
+
+- name: Set _found_file to the first existing file, or an empty list if no files found
+ ansible.builtin.set_fact:
+ _found_file: "{{ lookup('ansible.builtin.first_found', files, paths=['/extra/path'], skip=True) }}"
+ vars:
+ files:
+ - /path/to/foo.txt
+ - /path/to/bar.txt
+
+- name: Include tasks only if one of the files exist, otherwise skip the task
+ ansible.builtin.include_tasks:
+ file: "{{ item }}"
+ with_first_found:
+ - files:
+ - path/tasks.yaml
+ - path/other_tasks.yaml
+ skip: True
+
+- name: Include tasks only if one of the files exists, otherwise skip
+ ansible.builtin.include_tasks: '{{ tasks_file }}'
+ when: tasks_file != ""
+ vars:
+ tasks_file: "{{ lookup('ansible.builtin.first_found', files=['tasks.yaml', 'other_tasks.yaml'], errors='ignore') }}"
+
+- name: |
+ copy first existing file found to /some/file,
+ looking in relative directories from where the task is defined and
+ including any play objects that contain it
+ ansible.builtin.copy:
+ src: "{{ lookup('ansible.builtin.first_found', findme) }}"
+ dest: /some/file
+ vars:
+ findme:
+ - foo
+ - "{{ inventory_hostname }}"
+ - bar
+
+- name: same copy but specific paths
+ ansible.builtin.copy:
+ src: "{{ lookup('ansible.builtin.first_found', params) }}"
+ dest: /some/file
+ vars:
+ params:
+ files:
+ - foo
+ - "{{ inventory_hostname }}"
+ - bar
+ paths:
+ - /tmp/production
+ - /tmp/staging
+
+- name: INTERFACES | Create Ansible header for /etc/network/interfaces
+ ansible.builtin.template:
+ src: "{{ lookup('ansible.builtin.first_found', findme)}}"
+ dest: "/etc/foo.conf"
+ vars:
+ findme:
+ - "{{ ansible_virtualization_type }}_foo.conf"
+ - "default_foo.conf"
+
+- name: read vars from first file found, use 'vars/' relative subdir
+ ansible.builtin.include_vars: "{{lookup('ansible.builtin.first_found', params)}}"
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
+ paths:
+ - 'vars'
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - path to file found
+ type: list
+ elements: path
+"""
+import os
+import re
+
+from collections.abc import Mapping, Sequence
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+def _split_on(terms, spliters=','):
+ termlist = []
+ if isinstance(terms, string_types):
+ termlist = re.split(r'[%s]' % ''.join(map(re.escape, spliters)), terms)
+ else:
+ # added since options will already listify
+ for t in terms:
+ termlist.extend(_split_on(t, spliters))
+ return termlist
+
+
+class LookupModule(LookupBase):
+
+ def _process_terms(self, terms, variables, kwargs):
+
+ total_search = []
+ skip = False
+
+ # can use a dict instead of list item to pass inline config
+ for term in terms:
+ if isinstance(term, Mapping):
+ self.set_options(var_options=variables, direct=term)
+ elif isinstance(term, string_types):
+ self.set_options(var_options=variables, direct=kwargs)
+ elif isinstance(term, Sequence):
+ partial, skip = self._process_terms(term, variables, kwargs)
+ total_search.extend(partial)
+ continue
+ else:
+ raise AnsibleLookupError("Invalid term supplied, can handle string, mapping or list of strings but got: %s for %s" % (type(term), term))
+
+ files = self.get_option('files')
+ paths = self.get_option('paths')
+
+ # NOTE: this is used as 'global' but can be set many times?!?!?
+ skip = self.get_option('skip')
+
+ # magic extra spliting to create lists
+ filelist = _split_on(files, ',;')
+ pathlist = _split_on(paths, ',:;')
+
+ # create search structure
+ if pathlist:
+ for path in pathlist:
+ for fn in filelist:
+ f = os.path.join(path, fn)
+ total_search.append(f)
+ elif filelist:
+ # NOTE: this seems wrong, should be 'extend' as any option/entry can clobber all
+ total_search = filelist
+ else:
+ total_search.append(term)
+
+ return total_search, skip
+
+ def run(self, terms, variables, **kwargs):
+
+ total_search, skip = self._process_terms(terms, variables, kwargs)
+
+ # NOTE: during refactor noticed that the 'using a dict' as term
+ # is designed to only work with 'one' otherwise inconsistencies will appear.
+ # see other notes below.
+
+ # actually search
+ subdir = getattr(self, '_subdir', 'files')
+
+ path = None
+ for fn in total_search:
+
+ try:
+ fn = self._templar.template(fn)
+ except (AnsibleUndefinedVariable, UndefinedError):
+ continue
+
+ # get subdir if set by task executor, default to files otherwise
+ path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
+
+ # exit if we find one!
+ if path is not None:
+ return [path]
+
+ # if we get here, no file was found
+ if skip:
+ # NOTE: global skip wont matter, only last 'skip' value in dict term
+ return []
+ raise AnsibleLookupError("No file was found when using first_found.")
diff --git a/lib/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py
new file mode 100644
index 0000000..f63a895
--- /dev/null
+++ b/lib/ansible/plugins/lookup/indexed_items.py
@@ -0,0 +1,52 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: indexed_items
+ author: Michael DeHaan
+ version_added: "1.3"
+ short_description: rewrites lists to return 'indexed items'
+ description:
+ - use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
+ - any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
+ options:
+ _terms:
+ description: list of items
+ required: True
+"""
+
+EXAMPLES = """
+- name: indexed loop demo
+ ansible.builtin.debug:
+ msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
+ with_indexed_items:
+ - "{{ some_list }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - list with each item.0 giving you the position and item.1 the value
+ type: list
+ elements: list
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def __init__(self, basedir=None, **kwargs):
+ self.basedir = basedir
+
+ def run(self, terms, variables, **kwargs):
+
+ if not isinstance(terms, list):
+ raise AnsibleError("with_indexed_items expects a list")
+
+ items = self._flatten(terms)
+ return list(zip(range(len(items)), items))
diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
new file mode 100644
index 0000000..eea8634
--- /dev/null
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -0,0 +1,204 @@
+# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: ini
+ author: Yannig Perre (!UNKNOWN) <yannig.perre(at)gmail.com>
+ version_added: "2.0"
+ short_description: read data from an ini file
+ description:
+ - "The ini lookup reads the contents of a file in INI format C(key1=value1).
+ This plugin retrieves the value on the right side after the equal sign C('=') of a given section C([section])."
+ - "You can also read a property file which - in this case - does not contain section."
+ options:
+ _terms:
+ description: The key(s) to look up.
+ required: True
+ type:
+ description: Type of the file. 'properties' refers to the Java properties files.
+ default: 'ini'
+ choices: ['ini', 'properties']
+ file:
+ description: Name of the file to load.
+ default: 'ansible.ini'
+ section:
+ default: global
+ description: Section where to lookup the key.
+ re:
+ default: False
+ type: boolean
+ description: Flag to indicate if the key supplied is a regexp.
+ encoding:
+ default: utf-8
+ description: Text encoding to use.
+ default:
+ description: Return value if the key is not in the ini file.
+ default: ''
+ case_sensitive:
+ description:
+ Whether key names read from C(file) should be case sensitive. This prevents
+ duplicate key errors if keys only differ in case.
+ default: False
+ version_added: '2.12'
+ allow_no_value:
+ description:
+ - Read an ini file which contains key without value and without '=' symbol.
+ type: bool
+ default: False
+ aliases: ['allow_none']
+ version_added: '2.12'
+"""
+
+EXAMPLES = """
+- ansible.builtin.debug: msg="User in integration is {{ lookup('ansible.builtin.ini', 'user', section='integration', file='users.ini') }}"
+
+- ansible.builtin.debug: msg="User in production is {{ lookup('ansible.builtin.ini', 'user', section='production', file='users.ini') }}"
+
+- ansible.builtin.debug: msg="user.name is {{ lookup('ansible.builtin.ini', 'user.name', type='properties', file='user.properties') }}"
+
+- ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ q('ansible.builtin.ini', '.*', section='section1', file='test.ini', re=True) }}"
+
+- name: Read an ini file with allow_no_value
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.ini', 'user', file='mysql.ini', section='mysqld', allow_no_value=True) }}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - value(s) of the key(s) in the ini file
+ type: list
+ elements: str
+"""
+
+import configparser
+import os
+import re
+
+from io import StringIO
+from collections import defaultdict
+from collections.abc import MutableSequence
+
+from ansible.errors import AnsibleLookupError, AnsibleOptionsError
+from ansible.module_utils._text import to_text, to_native
+from ansible.plugins.lookup import LookupBase
+
+
+def _parse_params(term, paramvals):
+ '''Safely split parameter term to preserve spaces'''
+
+ # TODO: deprecate this method
+ valid_keys = paramvals.keys()
+ params = defaultdict(lambda: '')
+
+ # TODO: check kv_parser to see if it can handle spaces this same way
+ keys = []
+ thiskey = 'key' # initialize for 'lookup item'
+ for idp, phrase in enumerate(term.split()):
+
+ # update current key if used
+ if '=' in phrase:
+ for k in valid_keys:
+ if ('%s=' % k) in phrase:
+ thiskey = k
+
+ # if first term or key does not exist
+ if idp == 0 or not params[thiskey]:
+ params[thiskey] = phrase
+ keys.append(thiskey)
+ else:
+ # append to existing key
+ params[thiskey] += ' ' + phrase
+
+ # return list of values
+ return [params[x] for x in keys]
+
+
+class LookupModule(LookupBase):
+
+ def get_value(self, key, section, dflt, is_regexp):
+ # Retrieve all values from a section using a regexp
+ if is_regexp:
+ return [v for k, v in self.cp.items(section) if re.match(key, k)]
+ value = None
+ # Retrieve a single value
+ try:
+ value = self.cp.get(section, key)
+ except configparser.NoOptionError:
+ return dflt
+ return value
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+ paramvals = self.get_options()
+
+ self.cp = configparser.ConfigParser(allow_no_value=paramvals.get('allow_no_value', paramvals.get('allow_none')))
+ if paramvals['case_sensitive']:
+ self.cp.optionxform = to_native
+
+ ret = []
+ for term in terms:
+
+ key = term
+ # parameters specified?
+ if '=' in term or ' ' in term.strip():
+ self._deprecate_inline_kv()
+ params = _parse_params(term, paramvals)
+ try:
+ updated_key = False
+ for param in params:
+ if '=' in param:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleLookupError('%s is not a valid option.' % name)
+ paramvals[name] = value
+ elif key == term:
+ # only take first, this format never supported multiple keys inline
+ key = param
+ updated_key = True
+ except ValueError as e:
+ # bad params passed
+ raise AnsibleLookupError("Could not use '%s' from '%s': %s" % (param, params, to_native(e)), orig_exc=e)
+ if not updated_key:
+ raise AnsibleOptionsError("No key to lookup was provided as first term with in string inline options: %s" % term)
+ # only passed options in inline string
+
+ # TODO: look to use cache to avoid redoing this for every term if they use same file
+ # Retrieve file path
+ path = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+
+ # Create StringIO later used to parse ini
+ config = StringIO()
+ # Special case for java properties
+ if paramvals['type'] == "properties":
+ config.write(u'[java_properties]\n')
+ paramvals['section'] = 'java_properties'
+
+ # Open file using encoding
+ contents, show_data = self._loader._get_file_contents(path)
+ contents = to_text(contents, errors='surrogate_or_strict', encoding=paramvals['encoding'])
+ config.write(contents)
+ config.seek(0, os.SEEK_SET)
+
+ try:
+ self.cp.readfp(config)
+ except configparser.DuplicateOptionError as doe:
+ raise AnsibleLookupError("Duplicate option in '{file}': {error}".format(file=paramvals['file'], error=to_native(doe)))
+
+ try:
+ var = self.get_value(key, paramvals['section'], paramvals['default'], paramvals['re'])
+ except configparser.NoSectionError:
+ raise AnsibleLookupError("No section '{section}' in {file}".format(section=paramvals['section'], file=paramvals['file']))
+ if var is not None:
+ if isinstance(var, MutableSequence):
+ for v in var:
+ ret.append(v)
+ else:
+ ret.append(var)
+ return ret
diff --git a/lib/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
new file mode 100644
index 0000000..4fa1d68
--- /dev/null
+++ b/lib/ansible/plugins/lookup/inventory_hostnames.py
@@ -0,0 +1,53 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Steven Dossett <sdossett@panath.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: inventory_hostnames
+ author:
+ - Michael DeHaan
+ - Steven Dossett (!UNKNOWN) <sdossett@panath.com>
+ version_added: "1.3"
+ short_description: list of inventory hosts matching a host pattern
+ description:
+ - "This lookup understands 'host patterns' as used by the C(hosts:) keyword in plays
+ and can return a list of matching hosts from inventory"
+ notes:
+ - this is only worth for 'hostname patterns' it is easier to loop over the group/group_names variables otherwise.
+"""
+
+EXAMPLES = """
+- name: show all the hosts matching the pattern, i.e. all but the group www
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_inventory_hostnames:
+ - all:!www
+"""
+
+RETURN = """
+ _hostnames:
+ description: list of hostnames that matched the host pattern in inventory
+ type: list
+"""
+
+from ansible.errors import AnsibleError
+from ansible.inventory.manager import InventoryManager
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ manager = InventoryManager(self._loader, parse=False)
+ for group, hosts in variables['groups'].items():
+ manager.add_group(group)
+ for host in hosts:
+ manager.add_host(host, group=group)
+
+ try:
+ return [h.name for h in manager.get_hosts(pattern=terms)]
+ except AnsibleError:
+ return []
diff --git a/lib/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
new file mode 100644
index 0000000..162c1e7
--- /dev/null
+++ b/lib/ansible/plugins/lookup/items.py
@@ -0,0 +1,73 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: items
+ author: Michael DeHaan
+ version_added: historical
+ short_description: list of items
+ description:
+ - this lookup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
+ notes:
+ - this is the standard lookup used for loops in most examples
+ - check out the 'flattened' lookup for recursive flattening
+ - if you do not want flattening nor any other transformation look at the 'list' lookup.
+ options:
+ _terms:
+ description: list of items
+ required: True
+"""
+
+EXAMPLES = """
+- name: "loop through list"
+ ansible.builtin.debug:
+ msg: "An item: {{ item }}"
+ with_items:
+ - 1
+ - 2
+ - 3
+
+- name: add several users
+ ansible.builtin.user:
+ name: "{{ item }}"
+ groups: "wheel"
+ state: present
+ with_items:
+ - testuser1
+ - testuser2
+
+- name: "loop through list from a variable"
+ ansible.builtin.debug:
+ msg: "An item: {{ item }}"
+ with_items: "{{ somelist }}"
+
+- name: more complex items to add several users
+ ansible.builtin.user:
+ name: "{{ item.name }}"
+ uid: "{{ item.uid }}"
+ groups: "{{ item.groups }}"
+ state: present
+ with_items:
+ - { name: testuser1, uid: 1002, groups: "wheel, staff" }
+ - { name: testuser2, uid: 1003, groups: staff }
+
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - once flattened list
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+
+ return self._flatten(terms)
diff --git a/lib/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py
new file mode 100644
index 0000000..7676d01
--- /dev/null
+++ b/lib/ansible/plugins/lookup/lines.py
@@ -0,0 +1,62 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: lines
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read lines from command
+ description:
+ - Run one or more commands and split the output into lines, returning them as a list
+ options:
+ _terms:
+ description: command(s) to run
+ required: True
+ notes:
+ - Like all lookups, this runs on the Ansible controller and is unaffected by other keywords such as 'become'.
+ If you need to use different permissions, you must change the command or run Ansible as another user.
+ - Alternatively, you can use a shell/command task that runs against localhost and registers the result.
+"""
+
+EXAMPLES = """
+- name: We could read the file directly, but this shows output from command
+ ansible.builtin.debug: msg="{{ item }} is an output line from running cat on /etc/motd"
+ with_lines: cat /etc/motd
+
+- name: More useful example of looping over a command result
+ ansible.builtin.shell: "/usr/bin/frobnicate {{ item }}"
+ with_lines:
+ - "/usr/bin/frobnications_per_host --param {{ inventory_hostname }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - lines of stdout from command
+ type: list
+ elements: str
+"""
+
+import subprocess
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ ret = []
+ for term in terms:
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ if p.returncode == 0:
+ ret.extend([to_text(l) for l in stdout.splitlines()])
+ else:
+ raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
+ return ret
diff --git a/lib/ansible/plugins/lookup/list.py b/lib/ansible/plugins/lookup/list.py
new file mode 100644
index 0000000..6c553ae
--- /dev/null
+++ b/lib/ansible/plugins/lookup/list.py
@@ -0,0 +1,45 @@
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: list
+ author: Ansible Core Team
+ version_added: "2.0"
+ short_description: simply returns what it is given.
+ description:
+ - this is mostly a noop, to be used as a with_list loop when you dont want the content transformed in any way.
+"""
+
+EXAMPLES = """
+- name: unlike with_items you will get 3 items from this loop, the 2nd one being a list
+ ansible.builtin.debug: var=item
+ with_list:
+ - 1
+ - [2,3]
+ - 4
+"""
+
+RETURN = """
+ _list:
+ description: basically the same as you fed in
+ type: list
+ elements: raw
+"""
+
+from collections.abc import Sequence
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+ if not isinstance(terms, Sequence):
+ raise AnsibleError("with_list expects a list")
+ return terms
diff --git a/lib/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
new file mode 100644
index 0000000..e768dba
--- /dev/null
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -0,0 +1,85 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: nested
+ version_added: "1.1"
+ short_description: composes a list with nested elements of other lists
+ description:
+ - Takes the input lists and returns a list with elements that are lists composed of the elements of the input lists
+ options:
+ _raw:
+ description:
+ - a set of lists
+ required: True
+"""
+
+EXAMPLES = """
+- name: give users access to multiple databases
+ community.mysql.mysql_user:
+ name: "{{ item[0] }}"
+ priv: "{{ item[1] }}.*:ALL"
+ append_privs: yes
+ password: "foo"
+ with_nested:
+ - [ 'alice', 'bob' ]
+ - [ 'clientdb', 'employeedb', 'providerdb' ]
+# As with the case of 'with_items' above, you can use previously defined variables.:
+
+- name: here, 'users' contains the above list of employees
+ community.mysql.mysql_user:
+ name: "{{ item[0] }}"
+ priv: "{{ item[1] }}.*:ALL"
+ append_privs: yes
+ password: "foo"
+ with_nested:
+ - "{{ users }}"
+ - [ 'clientdb', 'employeedb', 'providerdb' ]
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list composed of lists paring the elements of the input lists
+ type: list
+"""
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+
+ def _lookup_variables(self, terms, variables):
+ results = []
+ for x in terms:
+ try:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, fail_on_undefined=True)
+ except UndefinedError as e:
+ raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms, variables)
+
+ my_list = terms[:]
+ my_list.reverse()
+ result = []
+ if len(my_list) == 0:
+ raise AnsibleError("with_nested requires at least one element in the nested list")
+ result = my_list.pop()
+ while len(my_list) > 0:
+ result2 = self._combine(result, my_list.pop())
+ result = result2
+ new_result = []
+ for x in result:
+ new_result.append(self._flatten(x))
+ return new_result
diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
new file mode 100644
index 0000000..06ea8b3
--- /dev/null
+++ b/lib/ansible/plugins/lookup/password.py
@@ -0,0 +1,389 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2013, Javier Candeira <javier@candeira.com>
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: password
+ version_added: "1.1"
+ author:
+ - Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ - Javier Candeira (!UNKNOWN) <javier@candeira.com>
+ - Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
+ short_description: retrieve or generate a random password, stored in a file
+ description:
+ - Generates a random plaintext password and stores it in a file at a given filepath.
+ - If the file exists previously, it will retrieve its contents, behaving just like with_file.
+ - 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
+ which simplifies password management in C("host_vars") variables.'
+ - A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
+ but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
+ options:
+ _terms:
+ description:
+ - path to the file that stores/will store the passwords
+ required: True
+ encrypt:
+ description:
+ - Which hash scheme to encrypt the returning password, should be one hash scheme from C(passlib.hash; md5_crypt, bcrypt, sha256_crypt, sha512_crypt).
+ - If not provided, the password will be returned in plain text.
+ - Note that the password is always stored as plain text, only the returning password is encrypted.
+ - Encrypt also forces saving the salt value for idempotence.
+ - Note that before 2.6 this option was incorrectly labeled as a boolean for a long time.
+ ident:
+ description:
+ - Specify version of Bcrypt algorithm to be used while using C(encrypt) as C(bcrypt).
+ - The parameter is only available for C(bcrypt) - U(https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#passlib.hash.bcrypt).
+ - Other hash types will simply ignore this parameter.
+ - 'Valid values for this parameter are: C(2), C(2a), C(2y), C(2b).'
+ type: string
+ version_added: "2.12"
+ chars:
+ version_added: "1.4"
+ description:
+ - A list of names that compose a custom character set in the generated passwords.
+ - 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9, and punctuation (". , : - _").'
+ - "They can be either parts of Python's string module attributes or represented literally ( :, -)."
+ - "Though string modules can vary by Python version, valid values for both major releases include:
+ 'ascii_lowercase', 'ascii_uppercase', 'digits', 'hexdigits', 'octdigits', 'printable', 'punctuation' and 'whitespace'."
+ - Be aware that Python's 'hexdigits' includes lower and upper case versions of a-f, so it is not a good choice as it doubles
+ the chances of those values for systems that won't distinguish case, distorting the expected entropy.
+ - "when using a comma separated string, to enter comma use two commas ',,' somewhere - preferably at the end.
+ Quotes and double quotes are not supported."
+ type: list
+ elements: str
+ default: ['ascii_letters', 'digits', ".,:-_"]
+ length:
+ description: The length of the generated password.
+ default: 20
+ type: integer
+ seed:
+ version_added: "2.12"
+ description:
+ - A seed to initialize the random number generator.
+ - Identical seeds will yield identical passwords.
+ - Use this for random-but-idempotent password generation.
+ type: str
+ notes:
+ - A great alternative to the password lookup plugin,
+ if you don't need to generate random passwords on a per-host basis,
+ would be to use Vault in playbooks.
+ Read the documentation there and consider using it first,
+ it will be more desirable for most applications.
+ - If the file already exists, no data will be written to it.
+ If the file has contents, those contents will be read in as the password.
+ Empty files cause the password to return as an empty string.
+ - 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
+ the target file must be readable by the playbook user, or, if it does not exist,
+ the playbook user must have sufficient privileges to create it.
+ (So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
+"""
+
+EXAMPLES = """
+- name: create a mysql user with a random password
+ community.mysql.mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('ansible.builtin.password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword length=15') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create a mysql user with a random password using only ascii letters
+ community.mysql.mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile chars=ascii_letters') }}"
+ priv: '{{ client }}_{{ tier }}_{{ role }}.*:ALL'
+
+- name: create a mysql user with an 8 character random password using only digits
+ community.mysql.mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile length=8 chars=digits') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create a mysql user with a random password using many different char sets
+ community.mysql.mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('ansible.builtin.password', '/tmp/passwordfile chars=ascii_letters,digits,punctuation') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create lowercase 8 character name for Kubernetes pod name
+ ansible.builtin.set_fact:
+ random_pod_name: "web-{{ lookup('ansible.builtin.password', '/dev/null chars=ascii_lowercase,digits length=8') }}"
+
+- name: create random but idempotent password
+ ansible.builtin.set_fact:
+ password: "{{ lookup('ansible.builtin.password', '/dev/null', seed=inventory_hostname) }}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - a password
+ type: list
+ elements: str
+"""
+
+import os
+import string
+import time
+import hashlib
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import string_types
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.encrypt import BaseHash, do_encrypt, random_password, random_salt
+from ansible.utils.path import makedirs_safe
+
+
+VALID_PARAMS = frozenset(('length', 'encrypt', 'chars', 'ident', 'seed'))
+
+
+def _read_password_file(b_path):
+ """Read the contents of a password file and return it
+ :arg b_path: A byte string containing the path to the password file
+ :returns: a text string containing the contents of the password file or
+ None if no password file was present.
+ """
+ content = None
+
+ if os.path.exists(b_path):
+ with open(b_path, 'rb') as f:
+ b_content = f.read().rstrip()
+ content = to_text(b_content, errors='surrogate_or_strict')
+
+ return content
+
+
+def _gen_candidate_chars(characters):
+ '''Generate a string containing all valid chars as defined by ``characters``
+
+ :arg characters: A list of character specs. The character specs are
+ shorthand names for sets of characters like 'digits', 'ascii_letters',
+ or 'punctuation' or a string to be included verbatim.
+
+ The values of each char spec can be:
+
+ * a name of an attribute in the 'strings' module ('digits' for example).
+ The value of the attribute will be added to the candidate chars.
+ * a string of characters. If the string isn't an attribute in 'string'
+ module, the string will be directly added to the candidate chars.
+
+ For example::
+
+ characters=['digits', '?|']``
+
+ will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
+ the question mark and pipe characters directly. Return will be the string::
+
+ u'0123456789?|'
+ '''
+ chars = []
+ for chars_spec in characters:
+ # getattr from string expands things like "ascii_letters" and "digits"
+ # into a set of characters.
+ chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec), errors='strict'))
+ chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
+ return chars
+
+
+def _parse_content(content):
+ '''parse our password data format into password and salt
+
+ :arg content: The data read from the file
+ :returns: password and salt
+ '''
+ password = content
+ salt = None
+
+ salt_slug = u' salt='
+ try:
+ sep = content.rindex(salt_slug)
+ except ValueError:
+ # No salt
+ pass
+ else:
+ salt = password[sep + len(salt_slug):]
+ password = content[:sep]
+
+ return password, salt
+
+
+def _format_content(password, salt, encrypt=None, ident=None):
+ """Format the password and salt for saving
+ :arg password: the plaintext password to save
+ :arg salt: the salt to use when encrypting a password
+ :arg encrypt: Which method the user requests that this password is encrypted.
+ Note that the password is saved in clear. Encrypt just tells us if we
+ must save the salt value for idempotence. Defaults to None.
+ :arg ident: Which version of BCrypt algorithm to be used.
+ Valid only if value of encrypt is bcrypt.
+ Defaults to None.
+ :returns: a text string containing the formatted information
+
+ .. warning:: Passwords are saved in clear. This is because the playbooks
+ expect to get cleartext passwords from this lookup.
+ """
+ if not encrypt and not salt:
+ return password
+
+ # At this point, the calling code should have assured us that there is a salt value.
+ if not salt:
+ raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
+
+ if ident:
+ return u'%s salt=%s ident=%s' % (password, salt, ident)
+ return u'%s salt=%s' % (password, salt)
+
+
+def _write_password_file(b_path, content):
+ b_pathdir = os.path.dirname(b_path)
+ makedirs_safe(b_pathdir, mode=0o700)
+
+ with open(b_path, 'wb') as f:
+ os.chmod(b_path, 0o600)
+ b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
+ f.write(b_content)
+
+
+def _get_lock(b_path):
+ """Get the lock for writing password file."""
+ first_process = False
+ b_pathdir = os.path.dirname(b_path)
+ lockfile_name = to_bytes("%s.ansible_lockfile" % hashlib.sha1(b_path).hexdigest())
+ lockfile = os.path.join(b_pathdir, lockfile_name)
+ if not os.path.exists(lockfile) and b_path != to_bytes('/dev/null'):
+ try:
+ makedirs_safe(b_pathdir, mode=0o700)
+ fd = os.open(lockfile, os.O_CREAT | os.O_EXCL)
+ os.close(fd)
+ first_process = True
+ except OSError as e:
+ if e.strerror != 'File exists':
+ raise
+
+ counter = 0
+ # if the lock is got by other process, wait until it's released
+ while os.path.exists(lockfile) and not first_process:
+ time.sleep(2 ** counter)
+ if counter >= 2:
+ raise AnsibleError("Password lookup cannot get the lock in 7 seconds, abort..."
+ "This may caused by un-removed lockfile"
+ "you can manually remove it from controller machine at %s and try again" % lockfile)
+ counter += 1
+ return first_process, lockfile
+
+
+def _release_lock(lockfile):
+ """Release the lock so other processes can read the password file."""
+ if os.path.exists(lockfile):
+ os.remove(lockfile)
+
+
+class LookupModule(LookupBase):
+
+ def _parse_parameters(self, term):
+ """Hacky parsing of params
+
+ See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
+ and the first_found lookup For how we want to fix this later
+ """
+ first_split = term.split(' ', 1)
+ if len(first_split) <= 1:
+ # Only a single argument given, therefore it's a path
+ relpath = term
+ params = dict()
+ else:
+ relpath = first_split[0]
+ params = parse_kv(first_split[1])
+ if '_raw_params' in params:
+ # Spaces in the path?
+ relpath = u' '.join((relpath, params['_raw_params']))
+ del params['_raw_params']
+
+ # Check that we parsed the params correctly
+ if not term.startswith(relpath):
+ # Likely, the user had a non parameter following a parameter.
+ # Reject this as a user typo
+ raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
+ # No _raw_params means we already found the complete path when
+ # we split it initially
+
+ # Check for invalid parameters. Probably a user typo
+ invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
+ if invalid_params:
+ raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
+
+ # Set defaults
+ params['length'] = int(params.get('length', self.get_option('length')))
+ params['encrypt'] = params.get('encrypt', self.get_option('encrypt'))
+ params['ident'] = params.get('ident', self.get_option('ident'))
+ params['seed'] = params.get('seed', self.get_option('seed'))
+
+ params['chars'] = params.get('chars', self.get_option('chars'))
+ if params['chars'] and isinstance(params['chars'], string_types):
+ tmp_chars = []
+ if u',,' in params['chars']:
+ tmp_chars.append(u',')
+ tmp_chars.extend(c for c in params['chars'].replace(u',,', u',').split(u',') if c)
+ params['chars'] = tmp_chars
+
+ return relpath, params
+
+ def run(self, terms, variables, **kwargs):
+ ret = []
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ for term in terms:
+ relpath, params = self._parse_parameters(term)
+ path = self._loader.path_dwim(relpath)
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ chars = _gen_candidate_chars(params['chars'])
+
+ changed = None
+ # make sure only one process finishes all the job first
+ first_process, lockfile = _get_lock(b_path)
+
+ content = _read_password_file(b_path)
+
+ if content is None or b_path == to_bytes('/dev/null'):
+ plaintext_password = random_password(params['length'], chars, params['seed'])
+ salt = None
+ changed = True
+ else:
+ plaintext_password, salt = _parse_content(content)
+
+ encrypt = params['encrypt']
+ if encrypt and not salt:
+ changed = True
+ try:
+ salt = random_salt(BaseHash.algorithms[encrypt].salt_size)
+ except KeyError:
+ salt = random_salt()
+
+ ident = params['ident']
+ if encrypt and not ident:
+ changed = True
+ try:
+ ident = BaseHash.algorithms[encrypt].implicit_ident
+ except KeyError:
+ ident = None
+
+ if changed and b_path != to_bytes('/dev/null'):
+ content = _format_content(plaintext_password, salt, encrypt=encrypt, ident=ident)
+ _write_password_file(b_path, content)
+
+ if first_process:
+ # let other processes continue
+ _release_lock(lockfile)
+
+ if encrypt:
+ password = do_encrypt(plaintext_password, encrypt, salt=salt, ident=ident)
+ ret.append(password)
+ else:
+ ret.append(plaintext_password)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py
new file mode 100644
index 0000000..54df3fc
--- /dev/null
+++ b/lib/ansible/plugins/lookup/pipe.py
@@ -0,0 +1,76 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ name: pipe
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read output from a command
+ description:
+ - Run a command and return the output.
+ options:
+ _terms:
+ description: command(s) to run.
+ required: True
+ notes:
+ - Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
+ so if you need to different permissions you must change the command or run Ansible as another user.
+ - Alternatively you can use a shell/command task that runs against localhost and registers the result.
+ - Pipe lookup internally invokes Popen with shell=True (this is required and intentional).
+ This type of invocation is considered a security issue if appropriate care is not taken to sanitize any user provided or variable input.
+ It is strongly recommended to pass user input or variable input via quote filter before using with pipe lookup.
+ See example section for this.
+ Read more about this L(Bandit B602 docs,https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html)
+"""
+
+EXAMPLES = r"""
+- name: raw result of running date command
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.pipe', 'date') }}"
+
+- name: Always use quote filter to make sure your variables are safe to use with shell
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.pipe', 'getent passwd ' + myuser | quote ) }}"
+"""
+
+RETURN = r"""
+ _string:
+ description:
+ - stdout from command
+ type: list
+ elements: str
+"""
+
+import subprocess
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ ret = []
+ for term in terms:
+ '''
+ https://docs.python.org/3/library/subprocess.html#popen-constructor
+
+ The shell argument (which defaults to False) specifies whether to use the
+ shell as the program to execute. If shell is True, it is recommended to pass
+ args as a string rather than as a sequence
+
+ https://github.com/ansible/ansible/issues/6550
+ '''
+ term = str(term)
+
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ if p.returncode == 0:
+ ret.append(stdout.decode("utf-8").rstrip())
+ else:
+ raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
+ return ret
diff --git a/lib/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
new file mode 100644
index 0000000..9f8a6ae
--- /dev/null
+++ b/lib/ansible/plugins/lookup/random_choice.py
@@ -0,0 +1,53 @@
+# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: random_choice
+ author: Michael DeHaan
+ version_added: "1.1"
+ short_description: return random element from list
+ description:
+ - The 'random_choice' feature can be used to pick something at random. While it's not a load balancer (there are modules for those),
+ it can somewhat be used as a poor man's load balancer in a MacGyver like situation.
+ - At a more basic level, they can be used to add chaos and excitement to otherwise predictable automation environments.
+"""
+
+EXAMPLES = """
+- name: Magic 8 ball for MUDs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_random_choice:
+ - "go through the door"
+ - "drink from the goblet"
+ - "press the red button"
+ - "do nothing"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - random item
+ type: raw
+"""
+import random
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, inject=None, **kwargs):
+
+ ret = terms
+ if terms:
+ try:
+ ret = [random.choice(terms)]
+ except Exception as e:
+ raise AnsibleError("Unable to choose random term: %s" % to_native(e))
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
new file mode 100644
index 0000000..8a000c5
--- /dev/null
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -0,0 +1,268 @@
+# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: sequence
+ author: Jayson Vantuyl (!UNKNOWN) <jayson@aggressive.ly>
+ version_added: "1.0"
+ short_description: generate a list based on a number sequence
+ description:
+ - generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
+ to increment the sequence, and an optional printf-style format string.
+ - 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
+ - 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
+ - Starting at version 1.9.2, negative strides are allowed.
+ - Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. C({{ 1 + item|int }}).
+ - See also Jinja2 C(range) filter as an alternative.
+ options:
+ start:
+ description: number at which to start the sequence
+ default: 0
+ type: integer
+ end:
+ description: number at which to end the sequence, dont use this with count
+ type: integer
+ default: 0
+ count:
+ description: number of elements in the sequence, this is not to be used with end
+ type: integer
+ default: 0
+ stride:
+ description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
+ type: integer
+ format:
+ description: return a string with the generated number formatted in
+"""
+
+EXAMPLES = """
+- name: create some test users
+ ansible.builtin.user:
+ name: "{{ item }}"
+ state: present
+ groups: "evens"
+ with_sequence: start=0 end=32 format=testuser%02x
+
+- name: create a series of directories with even numbers for some reason
+ ansible.builtin.file:
+ dest: "/var/stuff/{{ item }}"
+ state: directory
+ with_sequence: start=4 end=16 stride=2
+
+- name: a simpler way to use the sequence plugin create 4 groups
+ ansible.builtin.group:
+ name: "group{{ item }}"
+ state: present
+ with_sequence: count=4
+
+- name: the final countdown
+ ansible.builtin.debug:
+ msg: "{{item}} seconds to detonation"
+ with_sequence: start=10 end=0 stride=-1
+
+- name: Use of variable
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_sequence: start=1 end="{{ end_at }}"
+ vars:
+ - end_at: 10
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list containing generated sequence of items
+ type: list
+ elements: str
+"""
+
+from re import compile as re_compile, IGNORECASE
+
+from ansible.errors import AnsibleError
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+
+
+# shortcut format
+NUM = "(0?x?[0-9a-f]+)"
+SHORTCUT = re_compile(
+ "^(" + # Group 0
+ NUM + # Group 1: Start
+ "-)?" +
+ NUM + # Group 2: End
+ "(/" + # Group 3
+ NUM + # Group 4: Stride
+ ")?" +
+ "(:(.+))?$", # Group 5, Group 6: Format String
+ IGNORECASE
+)
+
+
+class LookupModule(LookupBase):
+ """
+ sequence lookup module
+
+ Used to generate some sequence of items. Takes arguments in two forms.
+
+ The simple / shortcut form is:
+
+ [start-]end[/stride][:format]
+
+ As indicated by the brackets: start, stride, and format string are all
+ optional. The format string is in the style of printf. This can be used
+ to pad with zeros, format in hexadecimal, etc. All of the numerical values
+ can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
+ Negative numbers are not supported.
+
+ Some examples:
+
+ 5 -> ["1","2","3","4","5"]
+ 5-8 -> ["5", "6", "7", "8"]
+ 2-10/2 -> ["2", "4", "6", "8", "10"]
+ 4:host%02d -> ["host01","host02","host03","host04"]
+
+ The standard Ansible key-value form is accepted as well. For example:
+
+ start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
+
+ This format takes an alternate form of "end" called "count", which counts
+ some number from the starting value. For example:
+
+ count=5 -> ["1", "2", "3", "4", "5"]
+ start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
+ start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
+ start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
+
+ The count option is mostly useful for avoiding off-by-one errors and errors
+ calculating the number of entries in a sequence when a stride is specified.
+ """
+
+ def reset(self):
+ """set sensible defaults"""
+ self.start = 1
+ self.count = None
+ self.end = None
+ self.stride = 1
+ self.format = "%d"
+
+ def parse_kv_args(self, args):
+ """parse key-value style arguments"""
+ for arg in ["start", "end", "count", "stride"]:
+ try:
+ arg_raw = args.pop(arg, None)
+ if arg_raw is None:
+ continue
+ arg_cooked = int(arg_raw, 0)
+ setattr(self, arg, arg_cooked)
+ except ValueError:
+ raise AnsibleError(
+ "can't parse %s=%s as integer"
+ % (arg, arg_raw)
+ )
+ if 'format' in args:
+ self.format = args.pop("format")
+ if args:
+ raise AnsibleError(
+ "unrecognized arguments to with_sequence: %s"
+ % list(args.keys())
+ )
+
+ def parse_simple_args(self, term):
+ """parse the shortcut forms, return True/False"""
+ match = SHORTCUT.match(term)
+ if not match:
+ return False
+
+ _, start, end, _, stride, _, format = match.groups()
+
+ if start is not None:
+ try:
+ start = int(start, 0)
+ except ValueError:
+ raise AnsibleError("can't parse start=%s as integer" % start)
+ if end is not None:
+ try:
+ end = int(end, 0)
+ except ValueError:
+ raise AnsibleError("can't parse end=%s as integer" % end)
+ if stride is not None:
+ try:
+ stride = int(stride, 0)
+ except ValueError:
+ raise AnsibleError("can't parse stride=%s as integer" % stride)
+
+ if start is not None:
+ self.start = start
+ if end is not None:
+ self.end = end
+ if stride is not None:
+ self.stride = stride
+ if format is not None:
+ self.format = format
+
+ return True
+
+ def sanity_check(self):
+ if self.count is None and self.end is None:
+ raise AnsibleError("must specify count or end in with_sequence")
+ elif self.count is not None and self.end is not None:
+ raise AnsibleError("can't specify both count and end in with_sequence")
+ elif self.count is not None:
+ # convert count to end
+ if self.count != 0:
+ self.end = self.start + self.count * self.stride - 1
+ else:
+ self.start = 0
+ self.end = 0
+ self.stride = 0
+ del self.count
+ if self.stride > 0 and self.end < self.start:
+ raise AnsibleError("to count backwards make stride negative")
+ if self.stride < 0 and self.end > self.start:
+ raise AnsibleError("to count forward don't make stride negative")
+ if self.format.count('%') != 1:
+ raise AnsibleError("bad formatting string: %s" % self.format)
+
+ def generate_sequence(self):
+ if self.stride >= 0:
+ adjust = 1
+ else:
+ adjust = -1
+ numbers = range(self.start, self.end + adjust, self.stride)
+
+ for i in numbers:
+ try:
+ formatted = self.format % i
+ yield formatted
+ except (ValueError, TypeError):
+ raise AnsibleError(
+ "problem formatting %r with %r" % (i, self.format)
+ )
+
+ def run(self, terms, variables, **kwargs):
+ results = []
+
+ for term in terms:
+ try:
+ self.reset() # clear out things for this iteration
+ try:
+ if not self.parse_simple_args(term):
+ self.parse_kv_args(parse_kv(term))
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
+
+ self.sanity_check()
+ if self.stride != 0:
+ results.extend(self.generate_sequence())
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError(
+ "unknown error generating sequence: %s" % e
+ )
+
+ return results
diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py
new file mode 100644
index 0000000..9b1af8b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/subelements.py
@@ -0,0 +1,169 @@
+# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: subelements
+ author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
+ version_added: "1.4"
+ short_description: traverse nested key from a list of dictionaries
+ description:
+ - Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records.
+ options:
+ _terms:
+ description: tuple of list of dictionaries and dictionary key to extract
+ required: True
+ skip_missing:
+ default: False
+ description:
+ - Lookup accepts this flag from a dictionary as optional. See Example section for more information.
+ - If set to C(True), the lookup plugin will skip the lists items that do not contain the given subkey.
+ - If set to C(False), the plugin will yield an error and complain about the missing subkey.
+"""
+
+EXAMPLES = """
+- name: show var structure as it is needed for example to make sense
+ hosts: all
+ vars:
+ users:
+ - name: alice
+ authorized:
+ - /tmp/alice/onekey.pub
+ - /tmp/alice/twokey.pub
+ mysql:
+ password: mysql-password
+ hosts:
+ - "%"
+ - "127.0.0.1"
+ - "::1"
+ - "localhost"
+ privs:
+ - "*.*:SELECT"
+ - "DB1.*:ALL"
+ groups:
+ - wheel
+ - name: bob
+ authorized:
+ - /tmp/bob/id_rsa.pub
+ mysql:
+ password: other-mysql-password
+ hosts:
+ - "db1"
+ privs:
+ - "*.*:SELECT"
+ - "DB2.*:ALL"
+ tasks:
+ - name: Set authorized ssh key, extracting just that data from 'users'
+ ansible.posix.authorized_key:
+ user: "{{ item.0.name }}"
+ key: "{{ lookup('file', item.1) }}"
+ with_subelements:
+ - "{{ users }}"
+ - authorized
+
+ - name: Setup MySQL users, given the mysql hosts and privs subkey lists
+ community.mysql.mysql_user:
+ name: "{{ item.0.name }}"
+ password: "{{ item.0.mysql.password }}"
+ host: "{{ item.1 }}"
+ priv: "{{ item.0.mysql.privs | join('/') }}"
+ with_subelements:
+ - "{{ users }}"
+ - mysql.hosts
+
+ - name: list groups for users that have them, don't error if groups key is missing
+ ansible.builtin.debug: var=item
+ loop: "{{ q('ansible.builtin.subelements', users, 'groups', {'skip_missing': True}) }}"
+"""
+
+RETURN = """
+_list:
+ description: list of subelements extracted
+"""
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+FLAGS = ('skip_missing',)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ def _raise_terms_error(msg=""):
+ raise AnsibleError(
+ "subelements lookup expects a list of two or three items, " + msg)
+
+ terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar)
+
+ # check lookup terms - check number of terms
+ if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
+ _raise_terms_error()
+
+ # first term should be a list (or dict), second a string holding the subkey
+ if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
+ _raise_terms_error("first a dict or a list, second a string pointing to the subkey")
+ subelements = terms[1].split(".")
+
+ if isinstance(terms[0], dict): # convert to list:
+ if terms[0].get('skipped', False) is not False:
+ # the registered result was completely skipped
+ return []
+ elementlist = []
+ for key in terms[0]:
+ elementlist.append(terms[0][key])
+ else:
+ elementlist = terms[0]
+
+ # check for optional flags in third term
+ flags = {}
+ if len(terms) == 3:
+ flags = terms[2]
+ if not isinstance(flags, dict) and not all(isinstance(key, string_types) and key in FLAGS for key in flags):
+ _raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
+
+ # build_items
+ ret = []
+ for item0 in elementlist:
+ if not isinstance(item0, dict):
+ raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
+ if item0.get('skipped', False) is not False:
+ # this particular item is to be skipped
+ continue
+
+ skip_missing = boolean(flags.get('skip_missing', False), strict=False)
+ subvalue = item0
+ lastsubkey = False
+ sublist = []
+ for subkey in subelements:
+ if subkey == subelements[-1]:
+ lastsubkey = True
+ if subkey not in subvalue:
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
+ if not lastsubkey:
+ if not isinstance(subvalue[subkey], dict):
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ subvalue = subvalue[subkey]
+ else: # lastsubkey
+ if not isinstance(subvalue[subkey], list):
+ raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ sublist = subvalue.pop(subkey, [])
+ for item1 in sublist:
+ ret.append((item0, item1))
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
new file mode 100644
index 0000000..9c575b5
--- /dev/null
+++ b/lib/ansible/plugins/lookup/template.py
@@ -0,0 +1,165 @@
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2012-17, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: template
+ author: Michael DeHaan
+ version_added: "0.9"
+ short_description: retrieve contents of file after templating with Jinja2
+ description:
+ - Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
+ options:
+ _terms:
+ description: list of files to template
+ convert_data:
+ type: bool
+ description:
+ - Whether to convert YAML into data. If False, strings that are YAML will be left untouched.
+ - Mutually exclusive with the jinja2_native option.
+ default: true
+ variable_start_string:
+ description: The string marking the beginning of a print statement.
+ default: '{{'
+ version_added: '2.8'
+ type: str
+ variable_end_string:
+ description: The string marking the end of a print statement.
+ default: '}}'
+ version_added: '2.8'
+ type: str
+ jinja2_native:
+ description:
+ - Controls whether to use Jinja2 native types.
+ - It is off by default even if global jinja2_native is True.
+ - Has no effect if global jinja2_native is False.
+ - This offers more flexibility than the template module which does not use Jinja2 native types at all.
+ - Mutually exclusive with the convert_data option.
+ default: False
+ version_added: '2.11'
+ type: bool
+ template_vars:
+ description: A dictionary, the keys become additional variables available for templating.
+ default: {}
+ version_added: '2.3'
+ type: dict
+ comment_start_string:
+ description: The string marking the beginning of a comment statement.
+ version_added: '2.12'
+ type: str
+ comment_end_string:
+ description: The string marking the end of a comment statement.
+ version_added: '2.12'
+ type: str
+"""
+
+EXAMPLES = """
+- name: show templating results
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.template', './some_template.j2') }}"
+
+- name: show templating results with different variable start and end string
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
+
+- name: show templating results with different comment start and end string
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.template', './some_template.j2', comment_start_string='[#', comment_end_string='#]') }}"
+"""
+
+RETURN = """
+_raw:
+ description: file(s) content after templating
+ type: list
+ elements: raw
+"""
+
+from copy import deepcopy
+import os
+
+import ansible.constants as C
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.template import generate_ansible_template_vars, AnsibleEnvironment
+from ansible.utils.display import Display
+from ansible.utils.native_jinja import NativeJinjaText
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ ret = []
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ # capture options
+ convert_data_p = self.get_option('convert_data')
+ lookup_template_vars = self.get_option('template_vars')
+ jinja2_native = self.get_option('jinja2_native') and C.DEFAULT_JINJA2_NATIVE
+ variable_start_string = self.get_option('variable_start_string')
+ variable_end_string = self.get_option('variable_end_string')
+ comment_start_string = self.get_option('comment_start_string')
+ comment_end_string = self.get_option('comment_end_string')
+
+ if jinja2_native:
+ templar = self._templar
+ else:
+ templar = self._templar.copy_with_new_env(environment_class=AnsibleEnvironment)
+
+ for term in terms:
+ display.debug("File lookup term: %s" % term)
+
+ lookupfile = self.find_file_in_search_path(variables, 'templates', term)
+ display.vvvv("File lookup using %s as file" % lookupfile)
+ if lookupfile:
+ b_template_data, show_data = self._loader._get_file_contents(lookupfile)
+ template_data = to_text(b_template_data, errors='surrogate_or_strict')
+
+ # set jinja2 internal search path for includes
+ searchpath = variables.get('ansible_search_path', [])
+ if searchpath:
+ # our search paths aren't actually the proper ones for jinja includes.
+ # We want to search into the 'templates' subdir of each search path in
+ # addition to our original search paths.
+ newsearchpath = []
+ for p in searchpath:
+ newsearchpath.append(os.path.join(p, 'templates'))
+ newsearchpath.append(p)
+ searchpath = newsearchpath
+ searchpath.insert(0, os.path.dirname(lookupfile))
+
+ # The template will have access to all existing variables,
+ # plus some added by ansible (e.g., template_{path,mtime}),
+ # plus anything passed to the lookup with the template_vars=
+ # argument.
+ vars = deepcopy(variables)
+ vars.update(generate_ansible_template_vars(term, lookupfile))
+ vars.update(lookup_template_vars)
+
+ with templar.set_temporary_context(variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ comment_start_string=comment_start_string,
+ comment_end_string=comment_end_string,
+ available_variables=vars, searchpath=searchpath):
+ res = templar.template(template_data, preserve_trailing_newlines=True,
+ convert_data=convert_data_p, escape_backslashes=False)
+
+ if (C.DEFAULT_JINJA2_NATIVE and not jinja2_native) or not convert_data_p:
+ # jinja2_native is true globally but off for the lookup, we need this text
+ # not to be processed by literal_eval anywhere in Ansible
+ res = NativeJinjaText(res)
+
+ ret.append(res)
+ else:
+ raise AnsibleError("the template file %s could not be found for the lookup" % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py
new file mode 100644
index 0000000..c990e06
--- /dev/null
+++ b/lib/ansible/plugins/lookup/together.py
@@ -0,0 +1,68 @@
+# (c) 2013, Bradley Young <young.bradley@gmail.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: together
+ author: Bradley Young (!UNKNOWN) <young.bradley@gmail.com>
+ version_added: '1.3'
+ short_description: merges lists into synchronized list
+ description:
+ - Creates a list with the iterated elements of the supplied lists
+ - "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
+ - This is basically the same as the 'zip_longest' filter and Python function
+ - Any 'unbalanced' elements will be substituted with 'None'
+ options:
+ _terms:
+ description: list of lists to merge
+ required: True
+"""
+
+EXAMPLES = """
+- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} and {{ item.1 }}"
+ with_together:
+ - ['a', 'b', 'c', 'd']
+ - [1, 2, 3, 4]
+"""
+
+RETURN = """
+ _list:
+ description: synchronized list
+ type: list
+ elements: list
+"""
+import itertools
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+ """
+ Transpose a list of arrays:
+ [1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
+ Replace any empty spots in 2nd array with None:
+ [1, 2], [3] -> [1, 3], [2, None]
+ """
+
+ def _lookup_variables(self, terms):
+ results = []
+ for x in terms:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms)
+
+ my_list = terms[:]
+ if len(my_list) == 0:
+ raise AnsibleError("with_together requires at least one element in each list")
+
+ return [self._flatten(x) for x in itertools.zip_longest(*my_list, fillvalue=None)]
diff --git a/lib/ansible/plugins/lookup/unvault.py b/lib/ansible/plugins/lookup/unvault.py
new file mode 100644
index 0000000..a9b7168
--- /dev/null
+++ b/lib/ansible/plugins/lookup/unvault.py
@@ -0,0 +1,63 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: unvault
+ author: Ansible Core Team
+ version_added: "2.10"
+ short_description: read vaulted file(s) contents
+ description:
+ - This lookup returns the contents from vaulted (or not) file(s) on the Ansible controller's file system.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ notes:
+ - This lookup does not understand 'globbing' nor shell environment variables.
+"""
+
+EXAMPLES = """
+- ansible.builtin.debug: msg="the value of foo.txt is {{ lookup('ansible.builtin.unvault', '/etc/foo.txt') | string | trim }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - content of file(s) as bytes
+ type: list
+ elements: raw
+"""
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ for term in terms:
+ display.debug("Unvault lookup term: %s" % term)
+
+ # Find the file in the expected search path
+ lookupfile = self.find_file_in_search_path(variables, 'files', term)
+ display.vvvv(u"Unvault lookup found %s" % lookupfile)
+ if lookupfile:
+ actual_file = self._loader.get_real_file(lookupfile, decrypt=True)
+ with open(actual_file, 'rb') as f:
+ b_contents = f.read()
+ ret.append(to_text(b_contents))
+ else:
+ raise AnsibleParserError('Unable to find file matching "%s" ' % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py
new file mode 100644
index 0000000..6790e1c
--- /dev/null
+++ b/lib/ansible/plugins/lookup/url.py
@@ -0,0 +1,264 @@
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+name: url
+author: Brian Coca (@bcoca)
+version_added: "1.9"
+short_description: return contents from URL
+description:
+ - Returns the content of the URL requested to be used as data in play.
+options:
+ _terms:
+ description: urls to query
+ validate_certs:
+ description: Flag to control SSL certificate validation
+ type: boolean
+ default: True
+ split_lines:
+ description: Flag to control if content is returned as a list of lines or as a single text blob
+ type: boolean
+ default: True
+ use_proxy:
+ description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
+ type: boolean
+ default: True
+ username:
+ description: Username to use for HTTP authentication.
+ type: string
+ version_added: "2.8"
+ password:
+ description: Password to use for HTTP authentication.
+ type: string
+ version_added: "2.8"
+ headers:
+ description: HTTP request headers
+ type: dictionary
+ default: {}
+ version_added: "2.9"
+ force:
+ description: Whether or not to set "cache-control" header with value "no-cache"
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_force
+ env:
+ - name: ANSIBLE_LOOKUP_URL_FORCE
+ ini:
+ - section: url_lookup
+ key: force
+ timeout:
+ description: How long to wait for the server to send data before giving up
+ type: float
+ version_added: "2.10"
+ default: 10
+ vars:
+ - name: ansible_lookup_url_timeout
+ env:
+ - name: ANSIBLE_LOOKUP_URL_TIMEOUT
+ ini:
+ - section: url_lookup
+ key: timeout
+ http_agent:
+ description: User-Agent to use in the request. The default was changed in 2.11 to C(ansible-httpget).
+ type: string
+ version_added: "2.10"
+ default: ansible-httpget
+ vars:
+ - name: ansible_lookup_url_agent
+ env:
+ - name: ANSIBLE_LOOKUP_URL_AGENT
+ ini:
+ - section: url_lookup
+ key: agent
+ force_basic_auth:
+ description: Force basic authentication
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_agent
+ env:
+ - name: ANSIBLE_LOOKUP_URL_AGENT
+ ini:
+ - section: url_lookup
+ key: agent
+ follow_redirects:
+ description: String of urllib2, all/yes, safe, none to determine how redirects are followed, see RedirectHandlerFactory for more information
+ type: string
+ version_added: "2.10"
+ default: 'urllib2'
+ vars:
+ - name: ansible_lookup_url_follow_redirects
+ env:
+ - name: ANSIBLE_LOOKUP_URL_FOLLOW_REDIRECTS
+ ini:
+ - section: url_lookup
+ key: follow_redirects
+ use_gssapi:
+ description:
+ - Use GSSAPI handler of requests
+ - As of Ansible 2.11, GSSAPI credentials can be specified with I(username) and I(password).
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_use_gssapi
+ env:
+ - name: ANSIBLE_LOOKUP_URL_USE_GSSAPI
+ ini:
+ - section: url_lookup
+ key: use_gssapi
+ use_netrc:
+ description:
+ - Determining whether to use credentials from ``~/.netrc`` file
+ - By default .netrc is used with Basic authentication headers
+ - When set to False, .netrc credentials are ignored
+ type: boolean
+ version_added: "2.14"
+ default: True
+ vars:
+ - name: ansible_lookup_url_use_netrc
+ env:
+ - name: ANSIBLE_LOOKUP_URL_USE_NETRC
+ ini:
+ - section: url_lookup
+ key: use_netrc
+ unix_socket:
+ description: String of file system path to unix socket file to use when establishing connection to the provided url
+ type: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_unix_socket
+ env:
+ - name: ANSIBLE_LOOKUP_URL_UNIX_SOCKET
+ ini:
+ - section: url_lookup
+ key: unix_socket
+ ca_path:
+ description: String of file system path to CA cert bundle to use
+ type: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_ca_path
+ env:
+ - name: ANSIBLE_LOOKUP_URL_CA_PATH
+ ini:
+ - section: url_lookup
+ key: ca_path
+ unredirected_headers:
+ description: A list of headers to not attach on a redirected request
+ type: list
+ elements: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_unredir_headers
+ env:
+ - name: ANSIBLE_LOOKUP_URL_UNREDIR_HEADERS
+ ini:
+ - section: url_lookup
+ key: unredirected_headers
+ ciphers:
+ description:
+ - SSL/TLS Ciphers to use for the request
+ - 'When a list is provided, all ciphers are joined in order with C(:)'
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions
+ type: list
+ elements: string
+ version_added: '2.14'
+ vars:
+ - name: ansible_lookup_url_ciphers
+ env:
+ - name: ANSIBLE_LOOKUP_URL_CIPHERS
+ ini:
+ - section: url_lookup
+ key: ciphers
+"""
+
+EXAMPLES = """
+- name: url lookup splits lines by default
+ ansible.builtin.debug: msg="{{item}}"
+ loop: "{{ lookup('ansible.builtin.url', 'https://github.com/gremlin.keys', wantlist=True) }}"
+
+- name: display ip ranges
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False) }}"
+
+- name: url lookup using authentication
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2') }}"
+
+- name: url lookup using basic authentication
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2', force_basic_auth='True') }}"
+
+- name: url lookup using headers
+ ansible.builtin.debug:
+ msg: "{{ lookup('ansible.builtin.url', 'https://some.private.site.com/api/service', headers={'header1':'value1', 'header2':'value2'} ) }}"
+"""
+
+RETURN = """
+ _list:
+ description: list of list of lines or content of url(s)
+ type: list
+ elements: str
+"""
+
+from urllib.error import HTTPError, URLError
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ ret = []
+ for term in terms:
+ display.vvvv("url lookup connecting to %s" % term)
+ try:
+ response = open_url(
+ term, validate_certs=self.get_option('validate_certs'),
+ use_proxy=self.get_option('use_proxy'),
+ url_username=self.get_option('username'),
+ url_password=self.get_option('password'),
+ headers=self.get_option('headers'),
+ force=self.get_option('force'),
+ timeout=self.get_option('timeout'),
+ http_agent=self.get_option('http_agent'),
+ force_basic_auth=self.get_option('force_basic_auth'),
+ follow_redirects=self.get_option('follow_redirects'),
+ use_gssapi=self.get_option('use_gssapi'),
+ unix_socket=self.get_option('unix_socket'),
+ ca_path=self.get_option('ca_path'),
+ unredirected_headers=self.get_option('unredirected_headers'),
+ ciphers=self.get_option('ciphers'),
+ use_netrc=self.get_option('use_netrc')
+ )
+ except HTTPError as e:
+ raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
+ except URLError as e:
+ raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
+ except SSLValidationError as e:
+ raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
+ except ConnectionError as e:
+ raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
+
+ if self.get_option('split_lines'):
+ for line in response.read().splitlines():
+ ret.append(to_text(line))
+ else:
+ ret.append(to_text(response.read()))
+ return ret
diff --git a/lib/ansible/plugins/lookup/varnames.py b/lib/ansible/plugins/lookup/varnames.py
new file mode 100644
index 0000000..442b81b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/varnames.py
@@ -0,0 +1,79 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: varnames
+ author: Ansible Core Team
+ version_added: "2.8"
+ short_description: Lookup matching variable names
+ description:
+ - Retrieves a list of matching Ansible variable names.
+ options:
+ _terms:
+ description: List of Python regex patterns to search for in variable names.
+ required: True
+"""
+
+EXAMPLES = """
+- name: List variables that start with qz_
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '^qz_.+')}}"
+ vars:
+ qz_1: hello
+ qz_2: world
+ qa_1: "I won't show"
+ qz_: "I won't show either"
+
+- name: Show all variables
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '.+')}}"
+
+- name: Show variables with 'hosts' in their names
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', 'hosts')}}"
+
+- name: Find several related variables that end specific way
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '.+_zone$', '.+_location$') }}"
+
+"""
+
+RETURN = """
+_value:
+ description:
+ - List of the variable names requested.
+ type: list
+"""
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if variables is None:
+ raise AnsibleError('No variables available to search')
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ ret = []
+ variable_names = list(variables.keys())
+ for term in terms:
+
+ if not isinstance(term, string_types):
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, it is a %s' % (term, type(term)))
+
+ try:
+ name = re.compile(term)
+ except Exception as e:
+ raise AnsibleError('Unable to use "%s" as a search parameter: %s' % (term, to_native(e)))
+
+ for varname in variable_names:
+ if name.search(varname):
+ ret.append(varname)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/vars.py b/lib/ansible/plugins/lookup/vars.py
new file mode 100644
index 0000000..dd5f763
--- /dev/null
+++ b/lib/ansible/plugins/lookup/vars.py
@@ -0,0 +1,106 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: vars
+ author: Ansible Core Team
+ version_added: "2.5"
+ short_description: Lookup templated value of variables
+ description:
+ - 'Retrieves the value of an Ansible variable. Note: Only returns top level variable names.'
+ options:
+ _terms:
+ description: The variable names to look up.
+ required: True
+ default:
+ description:
+ - What to return if a variable is undefined.
+ - If no default is set, it will result in an error if any of the variables is undefined.
+"""
+
+EXAMPLES = """
+- name: Show value of 'variablename'
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar) }}"
+ vars:
+ variablename: hello
+ myvar: ename
+
+- name: Show default empty since i dont have 'variablnotename'
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar, default='')}}"
+ vars:
+ variablename: hello
+ myvar: notename
+
+- name: Produce an error since i dont have 'variablnotename'
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar)}}"
+ ignore_errors: True
+ vars:
+ variablename: hello
+ myvar: notename
+
+- name: find several related variables
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}"
+
+- name: Access nested variables
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'variabl' + myvar).sub_var }}"
+ ignore_errors: True
+ vars:
+ variablename:
+ sub_var: 12
+ myvar: ename
+
+- name: alternate way to find some 'prefixed vars' in loop
+ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.vars', 'ansible_play_' + item) }}"
+ loop:
+ - hosts
+ - batch
+ - hosts_all
+"""
+
+RETURN = """
+_value:
+ description:
+ - value of the variables requested.
+ type: list
+ elements: raw
+"""
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ if variables is not None:
+ self._templar.available_variables = variables
+ myvars = getattr(self._templar, '_available_variables', {})
+
+ self.set_options(var_options=variables, direct=kwargs)
+ default = self.get_option('default')
+
+ ret = []
+ for term in terms:
+ if not isinstance(term, string_types):
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+
+ try:
+ try:
+ value = myvars[term]
+ except KeyError:
+ try:
+ value = myvars['hostvars'][myvars['inventory_hostname']][term]
+ except KeyError:
+ raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)
+
+ ret.append(self._templar.template(value, fail_on_undefined=True))
+ except AnsibleUndefinedVariable:
+ if default is not None:
+ ret.append(default)
+ else:
+ raise
+
+ return ret
diff --git a/lib/ansible/plugins/netconf/__init__.py b/lib/ansible/plugins/netconf/__init__.py
new file mode 100644
index 0000000..e99efbd
--- /dev/null
+++ b/lib/ansible/plugins/netconf/__init__.py
@@ -0,0 +1,375 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+from functools import wraps
+
+from ansible.errors import AnsibleError
+from ansible.plugins import AnsiblePlugin
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from ncclient.operations import RPCError
+ from ncclient.xml_ import to_xml, to_ele, NCElement
+ HAS_NCCLIENT = True
+ NCCLIENT_IMP_ERR = None
+# paramiko and gssapi are incompatible and raise AttributeError not ImportError
+# When running in FIPS mode, cryptography raises InternalError
+# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
+except Exception as err:
+ HAS_NCCLIENT = False
+ NCCLIENT_IMP_ERR = err
+
+try:
+ from lxml.etree import Element, SubElement, tostring, fromstring
+except ImportError:
+ from xml.etree.ElementTree import Element, SubElement, tostring, fromstring
+
+
+def ensure_ncclient(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if not HAS_NCCLIENT:
+ raise AnsibleError("%s: %s" % (missing_required_lib('ncclient'), to_native(NCCLIENT_IMP_ERR)))
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class NetconfBase(AnsiblePlugin):
+ """
+ A base class for implementing Netconf connections
+
+ .. note:: Unlike most of Ansible, nearly all strings in
+ :class:`TerminalBase` plugins are byte strings. This is because of
+ how close to the underlying platform these plugins operate. Remember
+ to mark literal strings as byte string (``b"string"``) and to use
+ :func:`~ansible.module_utils._text.to_bytes` and
+ :func:`~ansible.module_utils._text.to_text` to avoid unexpected
+ problems.
+
+ List of supported rpc's:
+ :get: Retrieves running configuration and device state information
+ :get_config: Retrieves the specified configuration from the device
+ :edit_config: Loads the specified commands into the remote device
+ :commit: Load configuration from candidate to running
+ :discard_changes: Discard changes to candidate datastore
+ :validate: Validate the contents of the specified configuration.
+ :lock: Allows the client to lock the configuration system of a device.
+ :unlock: Release a configuration lock, previously obtained with the lock operation.
+ :copy_config: create or replace an entire configuration datastore with the contents of another complete
+ configuration datastore.
+ :get-schema: Retrieves the required schema from the device
+ :get_capabilities: Retrieves device information and supported rpc methods
+
+ For JUNOS:
+ :execute_rpc: RPC to be execute on remote device
+ :load_configuration: Loads given configuration on device
+
+ Note: rpc support depends on the capabilities of remote device.
+
+ :returns: Returns output received from remote device as byte string
+ Note: the 'result' or 'error' from response should to be converted to object
+ of ElementTree using 'fromstring' to parse output as xml doc
+
+ 'get_capabilities()' returns 'result' as a json string.
+
+ Usage:
+ from ansible.module_utils.connection import Connection
+
+ conn = Connection()
+ data = conn.execute_rpc(rpc)
+ reply = fromstring(reply)
+
+ data = conn.get_capabilities()
+ json.loads(data)
+
+ conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text')
+ """
+
+ __rpc__ = ['rpc', 'get_config', 'get', 'edit_config', 'validate', 'copy_config', 'dispatch', 'lock', 'unlock',
+ 'discard_changes', 'commit', 'get_schema', 'delete_config', 'get_device_operations']
+
+ def __init__(self, connection):
+ super(NetconfBase, self).__init__()
+ self._connection = connection
+
+ @property
+ def m(self):
+ return self._connection.manager
+
+ def rpc(self, name):
+ """
+ RPC to be execute on remote device
+ :param name: Name of rpc in string format
+ :return: Received rpc response from remote host
+ """
+ try:
+ obj = to_ele(name)
+ resp = self.m.rpc(obj)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+ except RPCError as exc:
+ msg = exc.xml
+ raise Exception(to_xml(msg))
+
+ def get_config(self, source=None, filter=None):
+ """
+ Retrieve all or part of a specified configuration
+ (by default entire configuration is retrieved).
+ :param source: Name of the configuration datastore being queried, defaults to running datastore
+ :param filter: This argument specifies the portion of the configuration data to retrieve
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if isinstance(filter, list):
+ filter = tuple(filter)
+
+ if not source:
+ source = 'running'
+ resp = self.m.get_config(source=source, filter=filter)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def get(self, filter=None, with_defaults=None):
+ """
+ Retrieve device configuration and state information.
+ :param filter: This argument specifies the portion of the state data to retrieve
+ (by default entire state data is retrieved)
+ :param with_defaults: defines an explicit method of retrieving default values
+ from the configuration
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if isinstance(filter, list):
+ filter = tuple(filter)
+ resp = self.m.get(filter=filter, with_defaults=with_defaults)
+ response = resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+ return response
+
+ def edit_config(self, config=None, format='xml', target='candidate', default_operation=None, test_option=None, error_option=None):
+ """
+ Loads all or part of the specified *config* to the *target* configuration datastore.
+ :param config: Is the configuration, which must be rooted in the `config` element.
+ It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`.
+ :param format: The format of configuration eg. xml, text
+ :param target: Is the name of the configuration datastore being edited
+ :param default_operation: If specified must be one of { `"merge"`, `"replace"`, or `"none"` }
+ :param test_option: If specified must be one of { `"test_then_set"`, `"set"` }
+ :param error_option: If specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` }
+ The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability.
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if config is None:
+ raise ValueError('config value must be provided')
+ resp = self.m.edit_config(config, format=format, target=target, default_operation=default_operation, test_option=test_option,
+ error_option=error_option)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def validate(self, source='candidate'):
+ """
+ Validate the contents of the specified configuration.
+ :param source: Is the name of the configuration datastore being validated or `config` element
+ containing the configuration subtree to be validated
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.validate(source=source)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def copy_config(self, source, target):
+ """
+ Create or replace an entire configuration datastore with the contents of another complete configuration datastore.
+ :param source: Is the name of the configuration datastore to use as the source of the copy operation or `config`
+ element containing the configuration subtree to copy
+ :param target: Is the name of the configuration datastore to use as the destination of the copy operation
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.copy_config(source, target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def dispatch(self, rpc_command=None, source=None, filter=None):
+ """
+ Execute rpc on the remote device eg. dispatch('clear-arp-table')
+ :param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
+ :param source: name of the configuration datastore being queried
+ :param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if rpc_command is None:
+ raise ValueError('rpc_command value must be provided')
+
+ resp = self.m.dispatch(fromstring(rpc_command), source=source, filter=filter)
+
+ if isinstance(resp, NCElement):
+ # In case xml reply is transformed or namespace is removed in
+ # ncclient device specific handler return modified xml response
+ result = resp.data_xml
+ elif hasattr(resp, 'data_ele') and resp.data_ele:
+ # if data node is present in xml response return the xml string
+ # with data node as root
+ result = resp.data_xml
+ else:
+ # return raw xml string received from host with rpc-reply as the root node
+ result = resp.xml
+
+ return result
+
+ def lock(self, target="candidate"):
+ """
+ Allows the client to lock the configuration system of a device.
+ :param target: is the name of the configuration datastore to lock,
+ defaults to candidate datastore
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.lock(target=target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def unlock(self, target="candidate"):
+ """
+ Release a configuration lock, previously obtained with the lock operation.
+ :param target: is the name of the configuration datastore to unlock,
+ defaults to candidate datastore
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.unlock(target=target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def discard_changes(self):
+ """
+ Revert the candidate configuration to the currently running configuration.
+ Any uncommitted changes are discarded.
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.discard_changes()
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def commit(self, confirmed=False, timeout=None, persist=None):
+ """
+ Commit the candidate configuration as the device's new current configuration.
+ Depends on the `:candidate` capability.
+ A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no
+ followup commit within the *timeout* interval. If no timeout is specified the
+ confirm timeout defaults to 600 seconds (10 minutes).
+ A confirming commit may have the *confirmed* parameter but this is not required.
+ Depends on the `:confirmed-commit` capability.
+ :param confirmed: whether this is a confirmed commit
+ :param timeout: specifies the confirm timeout in seconds
+ :param persist: make the confirmed commit survive a session termination,
+ and set a token on the ongoing confirmed commit
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.commit(confirmed=confirmed, timeout=timeout, persist=persist)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def get_schema(self, identifier=None, version=None, format=None):
+ """
+ Retrieve a named schema, with optional revision and type.
+ :param identifier: name of the schema to be retrieved
+ :param version: version of schema to get
+ :param format: format of the schema to be retrieved, yang is the default
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.get_schema(identifier, version=version, format=format)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def delete_config(self, target):
+ """
+ delete a configuration datastore
+ :param target: specifies the name or URL of configuration datastore to delete
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.delete_config(target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def locked(self, target):
+ return self.m.locked(target)
+
+ @abstractmethod
+ def get_capabilities(self):
+ """
+ Retrieves device information and supported
+ rpc methods by device platform and return result
+ as a string
+ :return: Netconf session capability
+ """
+ pass
+
+ @staticmethod
+ def guess_network_os(obj):
+ """
+ Identifies the operating system of network device.
+ :param obj: ncclient manager connection instance
+ :return: The name of network operating system.
+ """
+ pass
+
+ def get_base_rpc(self):
+ """
+ Returns list of base rpc method supported by remote device
+ :return: List of RPC supported
+ """
+ return self.__rpc__
+
+ def put_file(self, source, destination):
+ """
+ Copies file to remote host
+ :param source: Source location of file
+ :param destination: Destination file path
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ pass
+
+ def fetch_file(self, source, destination):
+ """
+ Fetch file from remote host
+ :param source: Source location of file
+ :param destination: Source location of file
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ pass
+
+ def get_device_operations(self, server_capabilities):
+ """
+ Retrieve remote host capability from Netconf server hello message.
+ :param server_capabilities: Server capabilities received during Netconf session initialization
+ :return: Remote host capabilities in dictionary format
+ """
+ operations = {}
+ capabilities = '\n'.join(server_capabilities)
+ operations['supports_commit'] = ':candidate' in capabilities
+ operations['supports_defaults'] = ':with-defaults' in capabilities
+ operations['supports_confirm_commit'] = ':confirmed-commit' in capabilities
+ operations['supports_startup'] = ':startup' in capabilities
+ operations['supports_xpath'] = ':xpath' in capabilities
+ operations['supports_writable_running'] = ':writable-running' in capabilities
+ operations['supports_validate'] = ':validate' in capabilities
+
+ operations['lock_datastore'] = []
+ if operations['supports_writable_running']:
+ operations['lock_datastore'].append('running')
+
+ if operations['supports_commit']:
+ operations['lock_datastore'].append('candidate')
+
+ if operations['supports_startup']:
+ operations['lock_datastore'].append('startup')
+
+ operations['supports_lock'] = bool(operations['lock_datastore'])
+
+ return operations
+
+# TODO Restore .xml, when ncclient supports it for all platforms
diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
new file mode 100644
index 0000000..d5db261
--- /dev/null
+++ b/lib/ansible/plugins/shell/__init__.py
@@ -0,0 +1,239 @@
+# (c) 2016 RedHat
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import random
+import re
+import shlex
+import time
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import text_type, string_types
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.plugins import AnsiblePlugin
+
+_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
+
+
+class ShellBase(AnsiblePlugin):
+ def __init__(self):
+
+ super(ShellBase, self).__init__()
+
+ self.env = {}
+ self.tmpdir = None
+ self.executable = None
+
+ def _normalize_system_tmpdirs(self):
+ # Normalize the tmp directory strings. We don't use expanduser/expandvars because those
+ # can vary between remote user and become user. Therefore the safest practice will be for
+ # this to always be specified as full paths)
+ normalized_paths = [d.rstrip('/') for d in self.get_option('system_tmpdirs')]
+
+ # Make sure all system_tmpdirs are absolute otherwise they'd be relative to the login dir
+ # which is almost certainly going to fail in a cornercase.
+ if not all(os.path.isabs(d) for d in normalized_paths):
+ raise AnsibleError('The configured system_tmpdirs contains a relative path: {0}. All'
+ ' system_tmpdirs must be absolute'.format(to_native(normalized_paths)))
+
+ self.set_option('system_tmpdirs', normalized_paths)
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(ShellBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # set env if needed, deal with environment's 'dual nature' list of dicts or dict
+ # TODO: config system should already resolve this so we should be able to just iterate over dicts
+ env = self.get_option('environment')
+ if isinstance(env, string_types):
+ raise AnsibleError('The "envirionment" keyword takes a list of dictionaries or a dictionary, not a string')
+ if not isinstance(env, Sequence):
+ env = [env]
+ for env_dict in env:
+ if not isinstance(env_dict, Mapping):
+ raise AnsibleError('The "envirionment" keyword takes a list of dictionaries (or single dictionary), but got a "%s" instead' % type(env_dict))
+ self.env.update(env_dict)
+
+ # We can remove the try: except in the future when we make ShellBase a proper subset of
+ # *all* shells. Right now powershell and third party shells which do not use the
+ # shell_common documentation fragment (and so do not have system_tmpdirs) will fail
+ try:
+ self._normalize_system_tmpdirs()
+ except KeyError:
+ pass
+
+ @staticmethod
+ def _generate_temp_dir_name():
+ return 'ansible-tmp-%s-%s-%s' % (time.time(), os.getpid(), random.randint(0, 2**48))
+
+ def env_prefix(self, **kwargs):
+ return ' '.join(['%s=%s' % (k, shlex.quote(text_type(v))) for k, v in kwargs.items()])
+
+ def join_path(self, *args):
+ return os.path.join(*args)
+
+ # some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
+ def get_remote_filename(self, pathname):
+ base_name = os.path.basename(pathname.strip())
+ return base_name.strip()
+
+ def path_has_trailing_slash(self, path):
+ return path.endswith('/')
+
+ def chmod(self, paths, mode):
+ cmd = ['chmod', mode]
+ cmd.extend(paths)
+ cmd = [shlex.quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def chown(self, paths, user):
+ cmd = ['chown', user]
+ cmd.extend(paths)
+ cmd = [shlex.quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def chgrp(self, paths, group):
+ cmd = ['chgrp', group]
+ cmd.extend(paths)
+ cmd = [shlex.quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def set_user_facl(self, paths, user, mode):
+ """Only sets acls for users as that's really all we need"""
+ cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
+ cmd.extend(paths)
+ cmd = [shlex.quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def remove(self, path, recurse=False):
+ path = shlex.quote(path)
+ cmd = 'rm -f '
+ if recurse:
+ cmd += '-r '
+ return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
+
+ def exists(self, path):
+ cmd = ['test', '-e', shlex.quote(path)]
+ return ' '.join(cmd)
+
+ def mkdtemp(self, basefile=None, system=False, mode=0o700, tmpdir=None):
+ if not basefile:
+ basefile = self.__class__._generate_temp_dir_name()
+
+ # When system is specified we have to create this in a directory where
+ # other users can read and access the tmp directory.
+ # This is because we use system to create tmp dirs for unprivileged users who are
+ # sudo'ing to a second unprivileged user.
+ # The 'system_tmpdirs' setting defines dirctories we can use for this purpose
+ # the default are, /tmp and /var/tmp.
+ # So we only allow one of those locations if system=True, using the
+ # passed in tmpdir if it is valid or the first one from the setting if not.
+
+ if system:
+ if tmpdir:
+ tmpdir = tmpdir.rstrip('/')
+
+ if tmpdir in self.get_option('system_tmpdirs'):
+ basetmpdir = tmpdir
+ else:
+ basetmpdir = self.get_option('system_tmpdirs')[0]
+ else:
+ if tmpdir is None:
+ basetmpdir = self.get_option('remote_tmp')
+ else:
+ basetmpdir = tmpdir
+
+ basetmp = self.join_path(basetmpdir, basefile)
+
+ # use mkdir -p to ensure parents exist, but mkdir fullpath to ensure last one is created by us
+ cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmpdir, self._SHELL_SUB_RIGHT)
+ cmd += '%s mkdir %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
+ cmd += ' %s echo %s=%s echo %s %s' % (self._SHELL_AND, basefile, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
+
+ # change the umask in a subshell to achieve the desired mode
+ # also for directories created with `mkdir -p`
+ if mode:
+ tmp_umask = 0o777 & ~mode
+ cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
+
+ return cmd
+
+ def expand_user(self, user_home_path, username=''):
+ ''' Return a command to expand tildes in a path
+
+ It can be either "~" or "~username". We just ignore $HOME
+ We use the POSIX definition of a username:
+ http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
+ http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
+
+ Falls back to 'current working directory' as we assume 'home is where the remote user ends up'
+ '''
+
+ # Check that the user_path to expand is safe
+ if user_home_path != '~':
+ if not _USER_HOME_PATH_RE.match(user_home_path):
+ # shlex.quote will make the shell return the string verbatim
+ user_home_path = shlex.quote(user_home_path)
+ elif username:
+ # if present the user name is appended to resolve "that user's home"
+ user_home_path += username
+
+ return 'echo %s' % user_home_path
+
+ def pwd(self):
+ """Return the working directory after connecting"""
+ return 'echo %spwd%s' % (self._SHELL_SUB_LEFT, self._SHELL_SUB_RIGHT)
+
+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):
+ # don't quote the cmd if it's an empty string, because this will break pipelining mode
+ if cmd.strip() != '':
+ cmd = shlex.quote(cmd)
+
+ cmd_parts = []
+ if shebang:
+ shebang = shebang.replace("#!", "").strip()
+ else:
+ shebang = ""
+ cmd_parts.extend([env_string.strip(), shebang, cmd])
+ if arg_path is not None:
+ cmd_parts.append(arg_path)
+ new_cmd = " ".join(cmd_parts)
+ return new_cmd
+
+ def append_command(self, cmd, cmd_to_append):
+ """Append an additional command if supported by the shell"""
+
+ if self._SHELL_AND:
+ cmd += ' %s %s' % (self._SHELL_AND, cmd_to_append)
+
+ return cmd
+
+ def wrap_for_exec(self, cmd):
+ """wrap script execution with any necessary decoration (eg '&' for quoted powershell script paths)"""
+ return cmd
+
+ def quote(self, cmd):
+ """Returns a shell-escaped string that can be safely used as one token in a shell command line"""
+ return shlex.quote(cmd)
diff --git a/lib/ansible/plugins/shell/cmd.py b/lib/ansible/plugins/shell/cmd.py
new file mode 100644
index 0000000..c1083dc
--- /dev/null
+++ b/lib/ansible/plugins/shell/cmd.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: cmd
+version_added: '2.8'
+short_description: Windows Command Prompt
+description:
+- Used with the 'ssh' connection plugin and no C(DefaultShell) has been set on the Windows host.
+extends_documentation_fragment:
+- shell_windows
+'''
+
+import re
+
+from ansible.plugins.shell.powershell import ShellModule as PSShellModule
+
+# these are the metachars that have a special meaning in cmd that we want to escape when quoting
+_find_unsafe = re.compile(r'[\s\(\)\%\!^\"\<\>\&\|]').search
+
+
+class ShellModule(PSShellModule):
+
+ # Common shell filenames that this plugin handles
+ COMPATIBLE_SHELLS = frozenset() # type: frozenset[str]
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'cmd'
+
+ _SHELL_REDIRECT_ALLNULL = '>nul 2>&1'
+ _SHELL_AND = '&&'
+
+ # Used by various parts of Ansible to do Windows specific changes
+ _IS_WINDOWS = True
+
+ def quote(self, s):
+ # cmd does not support single quotes that the shlex_quote uses. We need to override the quoting behaviour to
+ # better match cmd.exe.
+ # https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
+
+ # Return an empty argument
+ if not s:
+ return '""'
+
+ if _find_unsafe(s) is None:
+ return s
+
+ # Escape the metachars as we are quoting the string to stop cmd from interpreting that metachar. For example
+ # 'file &whoami.exe' would result in 'file $(whoami.exe)' instead of the literal string
+ # https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
+ for c in '^()%!"<>&|': # '^' must be the first char that we scan and replace
+ if c in s:
+ # I can't find any docs that explicitly say this but to escape ", it needs to be prefixed with \^.
+ s = s.replace(c, ("\\^" if c == '"' else "^") + c)
+
+ return '^"' + s + '^"'
diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
new file mode 100644
index 0000000..de5e705
--- /dev/null
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -0,0 +1,287 @@
+# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: powershell
+version_added: historical
+short_description: Windows PowerShell
+description:
+- The only option when using 'winrm' or 'psrp' as a connection plugin.
+- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
+extends_documentation_fragment:
+- shell_windows
+'''
+
+import base64
+import os
+import re
+import shlex
+import pkgutil
+import xml.etree.ElementTree as ET
+import ntpath
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.shell import ShellBase
+
+
+_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
+
+
+def _parse_clixml(data, stream="Error"):
+ """
+ Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
+ message encoded in the XML data. CLIXML is used by PowerShell to encode
+ multiple objects in stderr.
+ """
+ lines = []
+
+ # There are some scenarios where the stderr contains a nested CLIXML element like
+ # '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
+ # Parse each individual <Objs> element and add the error strings to our stderr list.
+ # https://github.com/ansible/ansible/issues/69550
+ while data:
+ end_idx = data.find(b"</Objs>") + 7
+ current_element = data[data.find(b"<Objs "):end_idx]
+ data = data[end_idx:]
+
+ clixml = ET.fromstring(current_element)
+ namespace_match = re.match(r'{(.*)}', clixml.tag)
+ namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
+
+ strings = clixml.findall("./%sS" % namespace)
+ lines.extend([e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream])
+
+ return to_bytes('\r\n'.join(lines))
+
+
+class ShellModule(ShellBase):
+
+ # Common shell filenames that this plugin handles
+ # Powershell is handled differently. It's selected when winrm is the
+ # connection
+ COMPATIBLE_SHELLS = frozenset() # type: frozenset[str]
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'powershell'
+
+ _SHELL_REDIRECT_ALLNULL = '> $null'
+ _SHELL_AND = ';'
+
+ # Used by various parts of Ansible to do Windows specific changes
+ _IS_WINDOWS = True
+
+ # TODO: add binary module support
+
+ def env_prefix(self, **kwargs):
+ # powershell/winrm env handling is handled in the exec wrapper
+ return ""
+
+ def join_path(self, *args):
+ # use normpath() to remove doubled slashed and convert forward to backslashes
+ parts = [ntpath.normpath(self._unquote(arg)) for arg in args]
+
+ # Because ntpath.join treats any component that begins with a backslash as an absolute path,
+ # we have to strip slashes from at least the beginning, otherwise join will ignore all previous
+ # path components except for the drive.
+ return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
+
+ def get_remote_filename(self, pathname):
+ # powershell requires that script files end with .ps1
+ base_name = os.path.basename(pathname.strip())
+ name, ext = os.path.splitext(base_name.strip())
+ if ext.lower() not in ['.ps1', '.exe']:
+ return name + '.ps1'
+
+ return base_name.strip()
+
+ def path_has_trailing_slash(self, path):
+ # Allow Windows paths to be specified using either slash.
+ path = self._unquote(path)
+ return path.endswith('/') or path.endswith('\\')
+
+ def chmod(self, paths, mode):
+ raise NotImplementedError('chmod is not implemented for Powershell')
+
+ def chown(self, paths, user):
+ raise NotImplementedError('chown is not implemented for Powershell')
+
+ def set_user_facl(self, paths, user, mode):
+ raise NotImplementedError('set_user_facl is not implemented for Powershell')
+
+ def remove(self, path, recurse=False):
+ path = self._escape(self._unquote(path))
+ if recurse:
+ return self._encode_script('''Remove-Item '%s' -Force -Recurse;''' % path)
+ else:
+ return self._encode_script('''Remove-Item '%s' -Force;''' % path)
+
+ def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
+ # Windows does not have an equivalent for the system temp files, so
+ # the param is ignored
+ if not basefile:
+ basefile = self.__class__._generate_temp_dir_name()
+ basefile = self._escape(self._unquote(basefile))
+ basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
+
+ script = '''
+ $tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
+ $tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
+ Write-Output -InputObject $tmp.FullName
+ ''' % (basetmpdir, basefile)
+ return self._encode_script(script.strip())
+
+ def expand_user(self, user_home_path, username=''):
+ # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
+ # not seem to work remotely, though by default we are always starting
+ # in the user's home directory.
+ user_home_path = self._unquote(user_home_path)
+ if user_home_path == '~':
+ script = 'Write-Output (Get-Location).Path'
+ elif user_home_path.startswith('~\\'):
+ script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
+ else:
+ script = "Write-Output '%s'" % self._escape(user_home_path)
+ return self._encode_script(script)
+
+ def exists(self, path):
+ path = self._escape(self._unquote(path))
+ script = '''
+ If (Test-Path '%s')
+ {
+ $res = 0;
+ }
+ Else
+ {
+ $res = 1;
+ }
+ Write-Output '$res';
+ Exit $res;
+ ''' % path
+ return self._encode_script(script)
+
+ def checksum(self, path, *args, **kwargs):
+ path = self._escape(self._unquote(path))
+ script = '''
+ If (Test-Path -PathType Leaf '%(path)s')
+ {
+ $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
+ $fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
+ [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $fp.Dispose();
+ }
+ ElseIf (Test-Path -PathType Container '%(path)s')
+ {
+ Write-Output "3";
+ }
+ Else
+ {
+ Write-Output "1";
+ }
+ ''' % dict(path=path)
+ return self._encode_script(script)
+
+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):
+ bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
+
+ # pipelining bypass
+ if cmd == '':
+ return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
+
+ # non-pipelining
+
+ cmd_parts = shlex.split(cmd, posix=False)
+ cmd_parts = list(map(to_text, cmd_parts))
+ if shebang and shebang.lower() == '#!powershell':
+ if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
+ # we're running a module via the bootstrap wrapper
+ cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
+ wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
+ return wrapper_cmd
+ elif shebang and shebang.startswith('#!'):
+ cmd_parts.insert(0, shebang[2:])
+ elif not shebang:
+ # The module is assumed to be a binary
+ cmd_parts[0] = self._unquote(cmd_parts[0])
+ cmd_parts.append(arg_path)
+ script = '''
+ Try
+ {
+ %s
+ %s
+ }
+ Catch
+ {
+ $_obj = @{ failed = $true }
+ If ($_.Exception.GetType)
+ {
+ $_obj.Add('msg', $_.Exception.Message)
+ }
+ Else
+ {
+ $_obj.Add('msg', $_.ToString())
+ }
+ If ($_.InvocationInfo.PositionMessage)
+ {
+ $_obj.Add('exception', $_.InvocationInfo.PositionMessage)
+ }
+ ElseIf ($_.ScriptStackTrace)
+ {
+ $_obj.Add('exception', $_.ScriptStackTrace)
+ }
+ Try
+ {
+ $_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
+ }
+ Catch
+ {
+ }
+ Echo $_obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+ }
+ ''' % (env_string, ' '.join(cmd_parts))
+ return self._encode_script(script, preserve_rc=False)
+
+ def wrap_for_exec(self, cmd):
+ return '& %s; exit $LASTEXITCODE' % cmd
+
+ def _unquote(self, value):
+ '''Remove any matching quotes that wrap the given value.'''
+ value = to_text(value or '')
+ m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
+ if m:
+ return m.group(1)
+ m = re.match(r'^\s*?"(.*?)"\s*?$', value)
+ if m:
+ return m.group(1)
+ return value
+
+ def _escape(self, value):
+ '''Return value escaped for use in PowerShell single quotes.'''
+ # There are 5 chars that need to be escaped in a single quote.
+ # https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
+ return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
+
+ def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
+ '''Convert a PowerShell script to a single base64-encoded command.'''
+ script = to_text(script)
+
+ if script == u'-':
+ cmd_parts = _common_args + ['-Command', '-']
+
+ else:
+ if strict_mode:
+ script = u'Set-StrictMode -Version Latest\r\n%s' % script
+ # try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
+ # NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
+ if preserve_rc:
+ script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
+ % script
+ script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
+ encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
+ cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
+
+ if as_list:
+ return cmd_parts
+ return ' '.join(cmd_parts)
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
new file mode 100644
index 0000000..146c466
--- /dev/null
+++ b/lib/ansible/plugins/shell/sh.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: sh
+short_description: "POSIX shell (/bin/sh)"
+version_added: historical
+description:
+ - This shell plugin is the one you want to use on most Unix systems, it is the most compatible and widely installed shell.
+extends_documentation_fragment:
+ - shell_common
+'''
+
+import shlex
+
+from ansible.plugins.shell import ShellBase
+
+
+class ShellModule(ShellBase):
+
+ # Common shell filenames that this plugin handles.
+ # Note: sh is the default shell plugin so this plugin may also be selected
+ # This code needs to be SH-compliant. BASH-isms will not work if /bin/sh points to a non-BASH shell.
+
+ # if the filename is not listed in any Shell plugin.
+ COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'sh'
+
+ # commonly used
+ ECHO = 'echo'
+ COMMAND_SEP = ';'
+
+ # How to end lines in a python script one-liner
+ _SHELL_EMBEDDED_PY_EOL = '\n'
+ _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
+ _SHELL_AND = '&&'
+ _SHELL_OR = '||'
+ _SHELL_SUB_LEFT = '"`'
+ _SHELL_SUB_RIGHT = '`"'
+ _SHELL_GROUP_LEFT = '('
+ _SHELL_GROUP_RIGHT = ')'
+
+ def checksum(self, path, python_interp):
+ # In the following test, each condition is a check and logical
+ # comparison (|| or &&) that sets the rc value. Every check is run so
+ # the last check in the series to fail will be the rc that is returned.
+ #
+ # If a check fails we error before invoking the hash functions because
+ # hash functions may successfully take the hash of a directory on BSDs
+ # (UFS filesystem?) which is not what the rest of the ansible code expects
+ #
+ # If all of the available hashing methods fail we fail with an rc of 0.
+ # This logic is added to the end of the cmd at the bottom of this function.
+
+ # Return codes:
+ # checksum: success!
+ # 0: Unknown error
+ # 1: Remote file does not exist
+ # 2: No read permissions on the file
+ # 3: File is a directory
+ # 4: No python interpreter
+
+ # Quoting gets complex here. We're writing a python string that's
+ # used by a variety of shells on the remote host to invoke a python
+ # "one-liner".
+ shell_escaped_path = shlex.quote(path)
+ test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
+ csums = [
+ u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
+ u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
+ ]
+
+ cmd = (" %s " % self._SHELL_OR).join(csums)
+ cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
+ return cmd
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
new file mode 100644
index 0000000..5cc05ee
--- /dev/null
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -0,0 +1,1202 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import cmd
+import functools
+import os
+import pprint
+import queue
+import sys
+import threading
+import time
+
+from collections import deque
+from multiprocessing import Lock
+
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
+from ansible.executor import action_write_locks
+from ansible.executor.play_iterator import IteratingStates
+from ansible.executor.process.worker import WorkerProcess
+from ansible.executor.task_result import TaskResult
+from ansible.executor.task_queue_manager import CallbackSend, DisplaySend
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.handler import Handler
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.task import Task
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins import loader as plugin_loader
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.fqcn import add_internal_fqcns
+from ansible.utils.unsafe_proxy import wrap_var
+from ansible.utils.vars import combine_vars, isidentifier
+from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
+
+display = Display()
+
+__all__ = ['StrategyBase']
+
+# This list can be an exact match, or start of string bound
+# does not accept regex
+ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
+ 'discovered_interpreter_',
+))
+
+
+class StrategySentinel:
+ pass
+
+
+_sentinel = StrategySentinel()
+
+
+def post_process_whens(result, task, templar, task_vars):
+ cond = None
+ if task.changed_when:
+ with templar.set_temporary_context(available_variables=task_vars):
+ cond = Conditional(loader=templar._loader)
+ cond.when = task.changed_when
+ result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
+
+ if task.failed_when:
+ with templar.set_temporary_context(available_variables=task_vars):
+ if cond is None:
+ cond = Conditional(loader=templar._loader)
+ cond.when = task.failed_when
+ failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
+ result['failed_when_result'] = result['failed'] = failed_when_result
+
+
+def _get_item_vars(result, task):
+ item_vars = {}
+ if task.loop or task.loop_with:
+ loop_var = result.get('ansible_loop_var', 'item')
+ index_var = result.get('ansible_index_var')
+ if loop_var in result:
+ item_vars[loop_var] = result[loop_var]
+ if index_var and index_var in result:
+ item_vars[index_var] = result[index_var]
+ if '_ansible_item_label' in result:
+ item_vars['_ansible_item_label'] = result['_ansible_item_label']
+ if 'ansible_loop' in result:
+ item_vars['ansible_loop'] = result['ansible_loop']
+ return item_vars
+
+
+def results_thread_main(strategy):
+ while True:
+ try:
+ result = strategy._final_q.get()
+ if isinstance(result, StrategySentinel):
+ break
+ elif isinstance(result, DisplaySend):
+ display.display(*result.args, **result.kwargs)
+ elif isinstance(result, CallbackSend):
+ for arg in result.args:
+ if isinstance(arg, TaskResult):
+ strategy.normalize_task_result(arg)
+ break
+ strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
+ elif isinstance(result, TaskResult):
+ strategy.normalize_task_result(result)
+ with strategy._results_lock:
+ strategy._results.append(result)
+ else:
+ display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
+ except (IOError, EOFError):
+ break
+ except queue.Empty:
+ pass
+
+
+def debug_closure(func):
+ """Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
+ @functools.wraps(func)
+ def inner(self, iterator, one_pass=False, max_passes=None):
+ status_to_stats_map = (
+ ('is_failed', 'failures'),
+ ('is_unreachable', 'dark'),
+ ('is_changed', 'changed'),
+ ('is_skipped', 'skipped'),
+ )
+
+ # We don't know the host yet, copy the previous states, for lookup after we process new results
+ prev_host_states = iterator.host_states.copy()
+
+ results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
+ _processed_results = []
+
+ for result in results:
+ task = result._task
+ host = result._host
+ _queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
+ task_vars = _queued_task_args['task_vars']
+ play_context = _queued_task_args['play_context']
+ # Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
+ try:
+ prev_host_state = prev_host_states[host.name]
+ except KeyError:
+ prev_host_state = iterator.get_host_state(host)
+
+ while result.needs_debugger(globally_enabled=self.debugger_active):
+ next_action = NextAction()
+ dbg = Debugger(task, host, task_vars, play_context, result, next_action)
+ dbg.cmdloop()
+
+ if next_action.result == NextAction.REDO:
+ # rollback host state
+ self._tqm.clear_failed_hosts()
+ if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
+ for host_name, state in prev_host_states.items():
+ if host_name == host.name:
+ continue
+ iterator.set_state_for_host(host_name, state)
+ iterator._play._removed_hosts.remove(host_name)
+ iterator.set_state_for_host(host.name, prev_host_state)
+ for method, what in status_to_stats_map:
+ if getattr(result, method)():
+ self._tqm._stats.decrement(what, host.name)
+ self._tqm._stats.decrement('ok', host.name)
+
+ # redo
+ self._queue_task(host, task, task_vars, play_context)
+
+ _processed_results.extend(debug_closure(func)(self, iterator, one_pass))
+ break
+ elif next_action.result == NextAction.CONTINUE:
+ _processed_results.append(result)
+ break
+ elif next_action.result == NextAction.EXIT:
+ # Matches KeyboardInterrupt from bin/ansible
+ sys.exit(99)
+ else:
+ _processed_results.append(result)
+
+ return _processed_results
+ return inner
+
+
+class StrategyBase:
+
+ '''
+ This is the base class for strategy plugins, which contains some common
+ code useful to all strategies like running handlers, cleanup actions, etc.
+ '''
+
+ # by default, strategies should support throttling but we allow individual
+ # strategies to disable this and either forego supporting it or managing
+ # the throttling internally (as `free` does)
+ ALLOW_BASE_THROTTLING = True
+
+ def __init__(self, tqm):
+ self._tqm = tqm
+ self._inventory = tqm.get_inventory()
+ self._workers = tqm._workers
+ self._variable_manager = tqm.get_variable_manager()
+ self._loader = tqm.get_loader()
+ self._final_q = tqm._final_q
+ self._step = context.CLIARGS.get('step', False)
+ self._diff = context.CLIARGS.get('diff', False)
+
+ # the task cache is a dictionary of tuples of (host.name, task._uuid)
+ # used to find the original task object of in-flight tasks and to store
+ # the task args/vars and play context info used to queue the task.
+ self._queued_task_cache = {}
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ # internal counters
+ self._pending_results = 0
+ self._cur_worker = 0
+
+ # this dictionary is used to keep track of hosts that have
+ # outstanding tasks still in queue
+ self._blocked_hosts = dict()
+
+ self._results = deque()
+ self._results_lock = threading.Condition(threading.Lock())
+
+ # create the result processing thread for reading results in the background
+ self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
+ self._results_thread.daemon = True
+ self._results_thread.start()
+
+ # holds the list of active (persistent) connections to be shutdown at
+ # play completion
+ self._active_connections = dict()
+
+ # Caches for get_host calls, to avoid calling excessively
+ # These values should be set at the top of the ``run`` method of each
+ # strategy plugin. Use ``_set_hosts_cache`` to set these values
+ self._hosts_cache = []
+ self._hosts_cache_all = []
+
+ self.debugger_active = C.ENABLE_TASK_DEBUGGER
+
+ def _set_hosts_cache(self, play, refresh=True):
+ """Responsible for setting _hosts_cache and _hosts_cache_all
+
+ See comment in ``__init__`` for the purpose of these caches
+ """
+ if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
+ return
+
+ if not play.finalized and Templar(None).is_template(play.hosts):
+ _pattern = 'all'
+ else:
+ _pattern = play.hosts or 'all'
+ self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
+ self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
+
+ def cleanup(self):
+ # close active persistent connections
+ for sock in self._active_connections.values():
+ try:
+ conn = Connection(sock)
+ conn.reset()
+ except ConnectionError as e:
+ # most likely socket is already closed
+ display.debug("got an error while closing persistent connection: %s" % e)
+ self._final_q.put(_sentinel)
+ self._results_thread.join()
+
+ def run(self, iterator, play_context, result=0):
+ # execute one more pass through the iterator without peeking, to
+ # make sure that all of the hosts are advanced to their final task.
+ # This should be safe, as everything should be IteratingStates.COMPLETE by
+ # this point, though the strategy may not advance the hosts itself.
+
+ for host in self._hosts_cache:
+ if host not in self._tqm._unreachable_hosts:
+ try:
+ iterator.get_next_task_for_host(self._inventory.hosts[host])
+ except KeyError:
+ iterator.get_next_task_for_host(self._inventory.get_host(host))
+
+ # return the appropriate code, depending on the status hosts after the run
+ if not isinstance(result, bool) and result != self._tqm.RUN_OK:
+ return result
+ elif len(self._tqm._unreachable_hosts.keys()) > 0:
+ return self._tqm.RUN_UNREACHABLE_HOSTS
+ elif len(iterator.get_failed_hosts()) > 0:
+ return self._tqm.RUN_FAILED_HOSTS
+ else:
+ return self._tqm.RUN_OK
+
+ def get_hosts_remaining(self, play):
+ self._set_hosts_cache(play, refresh=False)
+ ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
+ return [host for host in self._hosts_cache if host not in ignore]
+
+ def get_failed_hosts(self, play):
+ self._set_hosts_cache(play, refresh=False)
+ return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
+
+ def add_tqm_variables(self, vars, play):
+ '''
+ Base class method to add extra variables/information to the list of task
+ vars sent through the executor engine regarding the task queue manager state.
+ '''
+ vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
+ vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
+
+ def _queue_task(self, host, task, task_vars, play_context):
+ ''' handles queueing the task up to be sent to a worker '''
+
+ display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
+
+ # Add a write lock for tasks.
+ # Maybe this should be added somewhere further up the call stack but
+ # this is the earliest in the code where we have task (1) extracted
+ # into its own variable and (2) there's only a single code path
+ # leading to the module being run. This is called by two
+ # functions: linear.py::run(), and
+ # free.py::run() so we'd have to add to both to do it there.
+ # The next common higher level is __init__.py::run() and that has
+ # tasks inside of play_iterator so we'd have to extract them to do it
+ # there.
+
+ if task.action not in action_write_locks.action_write_locks:
+ display.debug('Creating lock for %s' % task.action)
+ action_write_locks.action_write_locks[task.action] = Lock()
+
+ # create a templar and template things we need later for the queuing process
+ templar = Templar(loader=self._loader, variables=task_vars)
+
+ try:
+ throttle = int(templar.template(task.throttle))
+ except Exception as e:
+ raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
+
+ # and then queue the new task
+ try:
+ # Determine the "rewind point" of the worker list. This means we start
+ # iterating over the list of workers until the end of the list is found.
+ # Normally, that is simply the length of the workers list (as determined
+ # by the forks or serial setting), however a task/block/play may "throttle"
+ # that limit down.
+ rewind_point = len(self._workers)
+ if throttle > 0 and self.ALLOW_BASE_THROTTLING:
+ if task.run_once:
+ display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
+ else:
+ if throttle <= rewind_point:
+ display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
+ rewind_point = throttle
+
+ queued = False
+ starting_worker = self._cur_worker
+ while True:
+ if self._cur_worker >= rewind_point:
+ self._cur_worker = 0
+
+ worker_prc = self._workers[self._cur_worker]
+ if worker_prc is None or not worker_prc.is_alive():
+ self._queued_task_cache[(host.name, task._uuid)] = {
+ 'host': host,
+ 'task': task,
+ 'task_vars': task_vars,
+ 'play_context': play_context
+ }
+
+ worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
+ self._workers[self._cur_worker] = worker_prc
+ self._tqm.send_callback('v2_runner_on_start', host, task)
+ worker_prc.start()
+ display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
+ queued = True
+
+ self._cur_worker += 1
+
+ if self._cur_worker >= rewind_point:
+ self._cur_worker = 0
+
+ if queued:
+ break
+ elif self._cur_worker == starting_worker:
+ time.sleep(0.0001)
+
+ self._pending_results += 1
+ except (EOFError, IOError, AssertionError) as e:
+ # most likely an abort
+ display.debug("got an error while queuing: %s" % e)
+ return
+ display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
+
+ def get_task_hosts(self, iterator, task_host, task):
+ if task.run_once:
+ host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
+ else:
+ host_list = [task_host.name]
+ return host_list
+
+ def get_delegated_hosts(self, result, task):
+ host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
+ return [host_name or task.delegate_to]
+
+ def _set_always_delegated_facts(self, result, task):
+ """Sets host facts for ``delegate_to`` hosts for facts that should
+ always be delegated
+
+ This operation mutates ``result`` to remove the always delegated facts
+
+ See ``ALWAYS_DELEGATE_FACT_PREFIXES``
+ """
+ if task.delegate_to is None:
+ return
+
+ facts = result['ansible_facts']
+ always_keys = set()
+ _add = always_keys.add
+ for fact_key in facts:
+ for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
+ if fact_key.startswith(always_key):
+ _add(fact_key)
+ if always_keys:
+ _pop = facts.pop
+ always_facts = {
+ 'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
+ }
+ host_list = self.get_delegated_hosts(result, task)
+ _set_host_facts = self._variable_manager.set_host_facts
+ for target_host in host_list:
+ _set_host_facts(target_host, always_facts)
+
+ def normalize_task_result(self, task_result):
+ """Normalize a TaskResult to reference actual Host and Task objects
+ when only given the ``Host.name``, or the ``Task._uuid``
+
+ Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
+ the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
+
+ Mutates the original object
+ """
+
+ if isinstance(task_result._host, string_types):
+ # If the value is a string, it is ``Host.name``
+ task_result._host = self._inventory.get_host(to_text(task_result._host))
+
+ if isinstance(task_result._task, string_types):
+ # If the value is a string, it is ``Task._uuid``
+ queue_cache_entry = (task_result._host.name, task_result._task)
+ try:
+ found_task = self._queued_task_cache[queue_cache_entry]['task']
+ except KeyError:
+ # This should only happen due to an implicit task created by the
+ # TaskExecutor, restrict this behavior to the explicit use case
+ # of an implicit async_status task
+ if task_result._task_fields.get('action') != 'async_status':
+ raise
+ original_task = Task()
+ else:
+ original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
+ original_task._parent = found_task._parent
+ original_task.from_attrs(task_result._task_fields)
+ task_result._task = original_task
+
+ return task_result
+
+ @debug_closure
+ def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
+ '''
+ Reads results off the final queue and takes appropriate action
+ based on the result (executing callbacks, updating state, etc.).
+ '''
+
+ ret_results = []
+ handler_templar = Templar(self._loader)
+
+ def search_handler_blocks_by_name(handler_name, handler_blocks):
+ # iterate in reversed order since last handler loaded with the same name wins
+ for handler_block in reversed(handler_blocks):
+ for handler_task in handler_block.block:
+ if handler_task.name:
+ try:
+ if not handler_task.cached_name:
+ if handler_templar.is_template(handler_task.name):
+ handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
+ task=handler_task,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all)
+ handler_task.name = handler_templar.template(handler_task.name)
+ handler_task.cached_name = True
+
+ # first we check with the full result of get_name(), which may
+ # include the role name (if the handler is from a role). If that
+ # is not found, we resort to the simple name field, which doesn't
+ # have anything extra added to it.
+ candidates = (
+ handler_task.name,
+ handler_task.get_name(include_role_fqcn=False),
+ handler_task.get_name(include_role_fqcn=True),
+ )
+
+ if handler_name in candidates:
+ return handler_task
+ except (UndefinedError, AnsibleUndefinedVariable) as e:
+ # We skip this handler due to the fact that it may be using
+ # a variable in the name that was conditionally included via
+ # set_fact or some other method, and we don't want to error
+ # out unnecessarily
+ if not handler_task.listen:
+ display.warning(
+ "Handler '%s' is unusable because it has no listen topics and "
+ "the name could not be templated (host-specific variables are "
+ "not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
+ )
+ continue
+
+ cur_pass = 0
+ while True:
+ try:
+ self._results_lock.acquire()
+ task_result = self._results.popleft()
+ except IndexError:
+ break
+ finally:
+ self._results_lock.release()
+
+ original_host = task_result._host
+ original_task = task_result._task
+
+ # all host status messages contain 2 entries: (msg, task_result)
+ role_ran = False
+ if task_result.is_failed():
+ role_ran = True
+ ignore_errors = original_task.ignore_errors
+ if not ignore_errors:
+ # save the current state before failing it for later inspection
+ state_when_failed = iterator.get_state_for_host(original_host.name)
+ display.debug("marking %s as failed" % original_host.name)
+ if original_task.run_once:
+ # if we're using run_once, we have to fail every host here
+ for h in self._inventory.get_hosts(iterator._play.hosts):
+ if h.name not in self._tqm._unreachable_hosts:
+ iterator.mark_host_failed(h)
+ else:
+ iterator.mark_host_failed(original_host)
+
+ state, _ = iterator.get_next_task_for_host(original_host, peek=True)
+
+ if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
+ self._tqm._failed_hosts[original_host.name] = True
+
+ # if we're iterating on the rescue portion of a block then
+ # we save the failed task in a special var for use
+ # within the rescue/always
+ if iterator.is_any_block_rescuing(state_when_failed):
+ self._tqm._stats.increment('rescued', original_host.name)
+ iterator._play._removed_hosts.remove(original_host.name)
+ self._variable_manager.set_nonpersistent_facts(
+ original_host.name,
+ dict(
+ ansible_failed_task=wrap_var(original_task.serialize()),
+ ansible_failed_result=task_result._result,
+ ),
+ )
+ else:
+ self._tqm._stats.increment('failures', original_host.name)
+ else:
+ self._tqm._stats.increment('ok', original_host.name)
+ self._tqm._stats.increment('ignored', original_host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', original_host.name)
+ self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
+ elif task_result.is_unreachable():
+ ignore_unreachable = original_task.ignore_unreachable
+ if not ignore_unreachable:
+ self._tqm._unreachable_hosts[original_host.name] = True
+ iterator._play._removed_hosts.append(original_host.name)
+ self._tqm._stats.increment('dark', original_host.name)
+ else:
+ self._tqm._stats.increment('ok', original_host.name)
+ self._tqm._stats.increment('ignored', original_host.name)
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result)
+ elif task_result.is_skipped():
+ self._tqm._stats.increment('skipped', original_host.name)
+ self._tqm.send_callback('v2_runner_on_skipped', task_result)
+ else:
+ role_ran = True
+
+ if original_task.loop:
+ # this task had a loop, and has more than one result, so
+ # loop over all of them instead of a single result
+ result_items = task_result._result.get('results', [])
+ else:
+ result_items = [task_result._result]
+
+ for result_item in result_items:
+ if '_ansible_notify' in result_item:
+ if task_result.is_changed():
+ # The shared dictionary for notified handlers is a proxy, which
+ # does not detect when sub-objects within the proxy are modified.
+ # So, per the docs, we reassign the list so the proxy picks up and
+ # notifies all other threads
+ for handler_name in result_item['_ansible_notify']:
+ found = False
+ # Find the handler using the above helper. First we look up the
+ # dependency chain of the current task (if it's from a role), otherwise
+ # we just look through the list of handlers in the current play/all
+ # roles and use the first one that matches the notify name
+ target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
+ if target_handler is not None:
+ found = True
+ if target_handler.notify_host(original_host):
+ self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
+
+ for listening_handler_block in iterator._play.handlers:
+ for listening_handler in listening_handler_block.block:
+ listeners = getattr(listening_handler, 'listen', []) or []
+ if not listeners:
+ continue
+
+ listeners = listening_handler.get_validated_value(
+ 'listen', listening_handler.fattributes.get('listen'), listeners, handler_templar
+ )
+ if handler_name not in listeners:
+ continue
+ else:
+ found = True
+
+ if listening_handler.notify_host(original_host):
+ self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
+
+ # and if none were found, then we raise an error
+ if not found:
+ msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
+ "handlers list" % handler_name)
+ if C.ERROR_ON_MISSING_HANDLER:
+ raise AnsibleError(msg)
+ else:
+ display.warning(msg)
+
+ if 'add_host' in result_item:
+ # this task added a new host (add_host module)
+ new_host_info = result_item.get('add_host', dict())
+ self._inventory.add_dynamic_host(new_host_info, result_item)
+ # ensure host is available for subsequent plays
+ if result_item.get('changed') and new_host_info['host_name'] not in self._hosts_cache_all:
+ self._hosts_cache_all.append(new_host_info['host_name'])
+
+ elif 'add_group' in result_item:
+ # this task added a new group (group_by module)
+ self._inventory.add_dynamic_group(original_host, result_item)
+
+ if 'add_host' in result_item or 'add_group' in result_item:
+ item_vars = _get_item_vars(result_item, original_task)
+ found_task_vars = self._queued_task_cache.get((original_host.name, task_result._task._uuid))['task_vars']
+ if item_vars:
+ all_task_vars = combine_vars(found_task_vars, item_vars)
+ else:
+ all_task_vars = found_task_vars
+ all_task_vars[original_task.register] = wrap_var(result_item)
+ post_process_whens(result_item, original_task, handler_templar, all_task_vars)
+ if original_task.loop or original_task.loop_with:
+ new_item_result = TaskResult(
+ task_result._host,
+ task_result._task,
+ result_item,
+ task_result._task_fields,
+ )
+ self._tqm.send_callback('v2_runner_item_on_ok', new_item_result)
+ if result_item.get('changed', False):
+ task_result._result['changed'] = True
+ if result_item.get('failed', False):
+ task_result._result['failed'] = True
+
+ if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
+ # if delegated fact and we are delegating facts, we need to change target host for them
+ if original_task.delegate_to is not None and original_task.delegate_facts:
+ host_list = self.get_delegated_hosts(result_item, original_task)
+ else:
+ # Set facts that should always be on the delegated hosts
+ self._set_always_delegated_facts(result_item, original_task)
+
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+
+ if original_task.action in C._ACTION_INCLUDE_VARS:
+ for (var_name, var_value) in result_item['ansible_facts'].items():
+ # find the host we're actually referring too here, which may
+ # be a host that is not really in inventory at all
+ for target_host in host_list:
+ self._variable_manager.set_host_variable(target_host, var_name, var_value)
+ else:
+ cacheable = result_item.pop('_ansible_facts_cacheable', False)
+ for target_host in host_list:
+ # so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
+ # to avoid issues with precedence and confusion with set_fact normal operation,
+ # we set BOTH fact and nonpersistent_facts (aka hostvar)
+ # when fact is retrieved from cache in subsequent operations it will have the lower precedence,
+ # but for playbook setting it the 'higher' precedence is kept
+ is_set_fact = original_task.action in C._ACTION_SET_FACT
+ if not is_set_fact or cacheable:
+ self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
+ if is_set_fact:
+ self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
+
+ if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
+
+ if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+ else:
+ host_list = [None]
+
+ data = result_item['ansible_stats']['data']
+ aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
+ for myhost in host_list:
+ for k in data.keys():
+ if aggregate:
+ self._tqm._stats.update_custom_stats(k, data[k], myhost)
+ else:
+ self._tqm._stats.set_custom_stats(k, data[k], myhost)
+
+ if 'diff' in task_result._result:
+ if self._diff or getattr(original_task, 'diff', False):
+ self._tqm.send_callback('v2_on_file_diff', task_result)
+
+ if not isinstance(original_task, TaskInclude):
+ self._tqm._stats.increment('ok', original_host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', original_host.name)
+
+ # finally, send the ok for this task
+ self._tqm.send_callback('v2_runner_on_ok', task_result)
+
+ # register final results
+ if original_task.register:
+
+ if not isidentifier(original_task.register):
+ raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % original_task.register)
+
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+
+ clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
+ if 'invocation' in clean_copy:
+ del clean_copy['invocation']
+
+ for target_host in host_list:
+ self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
+
+ self._pending_results -= 1
+ if original_host.name in self._blocked_hosts:
+ del self._blocked_hosts[original_host.name]
+
+ # If this is a role task, mark the parent role as being run (if
+ # the task was ok or failed, but not skipped or unreachable)
+ if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
+ # lookup the role in the ROLE_CACHE to make sure we're dealing
+ # with the correct object and mark it as executed
+ for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
+ if role_obj._uuid == original_task._role._uuid:
+ role_obj._had_task_run[original_host.name] = True
+
+ ret_results.append(task_result)
+
+ if isinstance(original_task, Handler):
+ for handler in (h for b in iterator._play.handlers for h in b.block if h._uuid == original_task._uuid):
+ handler.remove_host(original_host)
+
+ if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
+ break
+
+ cur_pass += 1
+
+ return ret_results
+
+ def _wait_on_pending_results(self, iterator):
+ '''
+ Wait for the shared counter to drop to zero, using a short sleep
+ between checks to ensure we don't spin lock
+ '''
+
+ ret_results = []
+
+ display.debug("waiting for pending results...")
+ while self._pending_results > 0 and not self._tqm._terminated:
+
+ if self._tqm.has_dead_workers():
+ raise AnsibleError("A worker was found in a dead state")
+
+ results = self._process_pending_results(iterator)
+ ret_results.extend(results)
+ if self._pending_results > 0:
+ time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
+
+ display.debug("no more pending results, returning what we have")
+
+ return ret_results
+
+ def _copy_included_file(self, included_file):
+ '''
+ A proven safe and performant way to create a copy of an included file
+ '''
+ ti_copy = included_file._task.copy(exclude_parent=True)
+ ti_copy._parent = included_file._task._parent
+
+ temp_vars = ti_copy.vars | included_file._vars
+
+ ti_copy.vars = temp_vars
+
+ return ti_copy
+
+ def _load_included_file(self, included_file, iterator, is_handler=False):
+ '''
+ Loads an included YAML file of tasks, applying the optional set of variables.
+
+ Raises AnsibleError exception in case of a failure during including a file,
+ in such case the caller is responsible for marking the host(s) as failed
+ using PlayIterator.mark_host_failed().
+ '''
+ display.debug("loading included file: %s" % included_file._filename)
+ try:
+ data = self._loader.load_from_file(included_file._filename)
+ if data is None:
+ return []
+ elif not isinstance(data, list):
+ raise AnsibleError("included task files must contain a list of tasks")
+
+ ti_copy = self._copy_included_file(included_file)
+
+ block_list = load_list_of_blocks(
+ data,
+ play=iterator._play,
+ parent_block=ti_copy.build_parent_block(),
+ role=included_file._task._role,
+ use_handlers=is_handler,
+ loader=self._loader,
+ variable_manager=self._variable_manager,
+ )
+
+ # since we skip incrementing the stats when the task result is
+ # first processed, we do so now for each host in the list
+ for host in included_file._hosts:
+ self._tqm._stats.increment('ok', host.name)
+ except AnsibleParserError:
+ raise
+ except AnsibleError as e:
+ if isinstance(e, AnsibleFileNotFound):
+ reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
+ else:
+ reason = to_text(e)
+
+ for r in included_file._results:
+ r._result['failed'] = True
+
+ for host in included_file._hosts:
+ tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
+ self._tqm._stats.increment('failures', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', tr)
+ raise AnsibleError(reason) from e
+
+ # finally, send the callback and return the list of blocks loaded
+ self._tqm.send_callback('v2_playbook_on_include', included_file)
+ display.debug("done processing included file")
+ return block_list
+
+ def _take_step(self, task, host=None):
+
+ ret = False
+ msg = u'Perform task: %s ' % task
+ if host:
+ msg += u'on %s ' % host
+ msg += u'(N)o/(y)es/(c)ontinue: '
+ resp = display.prompt(msg)
+
+ if resp.lower() in ['y', 'yes']:
+ display.debug("User ran task")
+ ret = True
+ elif resp.lower() in ['c', 'continue']:
+ display.debug("User ran task and canceled step mode")
+ self._step = False
+ ret = True
+ else:
+ display.debug("User skipped task")
+
+ display.banner(msg)
+
+ return ret
+
+ def _cond_not_supported_warn(self, task_name):
+ display.warning("%s task does not support when conditional" % task_name)
+
+ def _execute_meta(self, task, play_context, iterator, target_host):
+
+ # meta tasks store their args in the _raw_params field of args,
+ # since they do not use k=v pairs, so get that
+ meta_action = task.args.get('_raw_params')
+
+ def _evaluate_conditional(h):
+ all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ return task.evaluate_conditional(templar, all_vars)
+
+ skipped = False
+ msg = meta_action
+ skip_reason = '%s conditional evaluated to False' % meta_action
+ if isinstance(task, Handler):
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
+ else:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+
+ # These don't support "when" conditionals
+ if meta_action in ('noop', 'refresh_inventory', 'reset_connection') and task.when:
+ self._cond_not_supported_warn(meta_action)
+
+ if meta_action == 'noop':
+ msg = "noop"
+ elif meta_action == 'flush_handlers':
+ if _evaluate_conditional(target_host):
+ host_state = iterator.get_state_for_host(target_host.name)
+ if host_state.run_state == IteratingStates.HANDLERS:
+ raise AnsibleError('flush_handlers cannot be used as a handler')
+ if target_host.name not in self._tqm._unreachable_hosts:
+ host_state.pre_flushing_run_state = host_state.run_state
+ host_state.run_state = IteratingStates.HANDLERS
+ msg = "triggered running handlers for %s" % target_host.name
+ else:
+ skipped = True
+ skip_reason += ', not running handlers for %s' % target_host.name
+ elif meta_action == 'refresh_inventory':
+ self._inventory.refresh_inventory()
+ self._set_hosts_cache(iterator._play)
+ msg = "inventory successfully refreshed"
+ elif meta_action == 'clear_facts':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ hostname = host.get_name()
+ self._variable_manager.clear_facts(hostname)
+ msg = "facts cleared"
+ else:
+ skipped = True
+ skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
+ elif meta_action == 'clear_host_errors':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ self._tqm._failed_hosts.pop(host.name, False)
+ self._tqm._unreachable_hosts.pop(host.name, False)
+ iterator.clear_host_errors(host)
+ msg = "cleared host errors"
+ else:
+ skipped = True
+ skip_reason += ', not clearing host error state for %s' % target_host.name
+ elif meta_action == 'end_batch':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ if host.name not in self._tqm._unreachable_hosts:
+ iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
+ msg = "ending batch"
+ else:
+ skipped = True
+ skip_reason += ', continuing current batch'
+ elif meta_action == 'end_play':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ if host.name not in self._tqm._unreachable_hosts:
+ iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
+ # end_play is used in PlaybookExecutor/TQM to indicate that
+ # the whole play is supposed to be ended as opposed to just a batch
+ iterator.end_play = True
+ msg = "ending play"
+ else:
+ skipped = True
+ skip_reason += ', continuing play'
+ elif meta_action == 'end_host':
+ if _evaluate_conditional(target_host):
+ iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
+ iterator._play._removed_hosts.append(target_host.name)
+ msg = "ending play for %s" % target_host.name
+ else:
+ skipped = True
+ skip_reason += ", continuing execution for %s" % target_host.name
+ # TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
+ msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
+ elif meta_action == 'role_complete':
+ # Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
+ # How would this work with allow_duplicates??
+ if task.implicit:
+ if target_host.name in task._role._had_task_run:
+ task._role._completed[target_host.name] = True
+ msg = 'role_complete for %s' % target_host.name
+ elif meta_action == 'reset_connection':
+ all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ templar = Templar(loader=self._loader, variables=all_vars)
+
+ # apply the given task's information to the connection info,
+ # which may override some fields already set by the play or
+ # the options specified on the command line
+ play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
+
+ # fields set from the play/task may be based on variables, so we have to
+ # do the same kind of post validation step on it here before we use it.
+ play_context.post_validate(templar=templar)
+
+ # now that the play context is finalized, if the remote_addr is not set
+ # default to using the host's address field as the remote address
+ if not play_context.remote_addr:
+ play_context.remote_addr = target_host.address
+
+ # We also add "magic" variables back into the variables dict to make sure
+ # a certain subset of variables exist. This 'mostly' works here cause meta
+ # disregards the loop, but should not really use play_context at all
+ play_context.update_vars(all_vars)
+
+ if target_host in self._active_connections:
+ connection = Connection(self._active_connections[target_host])
+ del self._active_connections[target_host]
+ else:
+ connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
+ connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
+ play_context.set_attributes_from_plugin(connection)
+
+ if connection:
+ try:
+ connection.reset()
+ msg = 'reset connection'
+ except ConnectionError as e:
+ # most likely socket is already closed
+ display.debug("got an error while closing persistent connection: %s" % e)
+ else:
+ msg = 'no connection, nothing to reset'
+ else:
+ raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
+
+ result = {'msg': msg}
+ if skipped:
+ result['skipped'] = True
+ result['skip_reason'] = skip_reason
+ else:
+ result['changed'] = False
+
+ if not task.implicit:
+ header = skip_reason if skipped else msg
+ display.vv(f"META: {header}")
+
+ if isinstance(task, Handler):
+ task.remove_host(target_host)
+
+ res = TaskResult(target_host, task, result)
+ if skipped:
+ self._tqm.send_callback('v2_runner_on_skipped', res)
+ return [res]
+
+ def get_hosts_left(self, iterator):
+ ''' returns list of available hosts for this iterator by filtering out unreachables '''
+
+ hosts_left = []
+ for host in self._hosts_cache:
+ if host not in self._tqm._unreachable_hosts:
+ try:
+ hosts_left.append(self._inventory.hosts[host])
+ except KeyError:
+ hosts_left.append(self._inventory.get_host(host))
+ return hosts_left
+
+ def update_active_connections(self, results):
+ ''' updates the current active persistent connections '''
+ for r in results:
+ if 'args' in r._task_fields:
+ socket_path = r._task_fields['args'].get('_ansible_socket')
+ if socket_path:
+ if r._host not in self._active_connections:
+ self._active_connections[r._host] = socket_path
+
+
+class NextAction(object):
+ """ The next action after an interpreter's exit. """
+ REDO = 1
+ CONTINUE = 2
+ EXIT = 3
+
+ def __init__(self, result=EXIT):
+ self.result = result
+
+
+class Debugger(cmd.Cmd):
+ prompt_continuous = '> ' # multiple lines
+
+ def __init__(self, task, host, task_vars, play_context, result, next_action):
+ # cmd.Cmd is old-style class
+ cmd.Cmd.__init__(self)
+
+ self.prompt = '[%s] %s (debug)> ' % (host, task)
+ self.intro = None
+ self.scope = {}
+ self.scope['task'] = task
+ self.scope['task_vars'] = task_vars
+ self.scope['host'] = host
+ self.scope['play_context'] = play_context
+ self.scope['result'] = result
+ self.next_action = next_action
+
+ def cmdloop(self):
+ try:
+ cmd.Cmd.cmdloop(self)
+ except KeyboardInterrupt:
+ pass
+
+ do_h = cmd.Cmd.do_help
+
+ def do_EOF(self, args):
+ """Quit"""
+ return self.do_quit(args)
+
+ def do_quit(self, args):
+ """Quit"""
+ display.display('User interrupted execution')
+ self.next_action.result = NextAction.EXIT
+ return True
+
+ do_q = do_quit
+
+ def do_continue(self, args):
+ """Continue to next result"""
+ self.next_action.result = NextAction.CONTINUE
+ return True
+
+ do_c = do_continue
+
+ def do_redo(self, args):
+ """Schedule task for re-execution. The re-execution may not be the next result"""
+ self.next_action.result = NextAction.REDO
+ return True
+
+ do_r = do_redo
+
+ def do_update_task(self, args):
+ """Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
+ templar = Templar(None, variables=self.scope['task_vars'])
+ task = self.scope['task']
+ task = task.load_data(task._ds)
+ task.post_validate(templar)
+ self.scope['task'] = task
+
+ do_u = do_update_task
+
+ def evaluate(self, args):
+ try:
+ return eval(args, globals(), self.scope)
+ except Exception:
+ t, v = sys.exc_info()[:2]
+ if isinstance(t, str):
+ exc_type_name = t
+ else:
+ exc_type_name = t.__name__
+ display.display('***%s:%s' % (exc_type_name, repr(v)))
+ raise
+
+ def do_pprint(self, args):
+ """Pretty Print"""
+ try:
+ result = self.evaluate(args)
+ display.display(pprint.pformat(result))
+ except Exception:
+ pass
+
+ do_p = do_pprint
+
+ def execute(self, args):
+ try:
+ code = compile(args + '\n', '<stdin>', 'single')
+ exec(code, globals(), self.scope)
+ except Exception:
+ t, v = sys.exc_info()[:2]
+ if isinstance(t, str):
+ exc_type_name = t
+ else:
+ exc_type_name = t.__name__
+ display.display('***%s:%s' % (exc_type_name, repr(v)))
+ raise
+
+ def default(self, line):
+ try:
+ self.execute(line)
+ except Exception:
+ pass
diff --git a/lib/ansible/plugins/strategy/debug.py b/lib/ansible/plugins/strategy/debug.py
new file mode 100644
index 0000000..f808bcf
--- /dev/null
+++ b/lib/ansible/plugins/strategy/debug.py
@@ -0,0 +1,37 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: debug
+ short_description: Executes tasks in interactive debug session.
+ description:
+ - Task execution is 'linear' but controlled by an interactive debug session.
+ version_added: "2.1"
+ author: Kishin Yagami (!UNKNOWN)
+'''
+
+import cmd
+import pprint
+import sys
+
+from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule
+
+
+class StrategyModule(LinearStrategyModule):
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self.debugger_active = True
diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py
new file mode 100644
index 0000000..6f45114
--- /dev/null
+++ b/lib/ansible/plugins/strategy/free.py
@@ -0,0 +1,303 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: free
+ short_description: Executes tasks without waiting for all hosts
+ description:
+ - Task execution is as fast as possible per batch as defined by C(serial) (default all).
+ Ansible will not wait for other hosts to finish the current task before queuing more tasks for other hosts.
+ All hosts are still attempted for the current task, but it prevents blocking new tasks for hosts that have already finished.
+ - With the free strategy, unlike the default linear strategy, a host that is slow or stuck on a specific task
+ won't hold up the rest of the hosts and tasks.
+ version_added: "2.0"
+ author: Ansible Core Team
+'''
+
+import time
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.handler import Handler
+from ansible.playbook.included_file import IncludedFile
+from ansible.plugins.loader import action_loader
+from ansible.plugins.strategy import StrategyBase
+from ansible.template import Templar
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(StrategyBase):
+
+ # This strategy manages throttling on its own, so we don't want it done in queue_task
+ ALLOW_BASE_THROTTLING = False
+
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self._host_pinned = False
+
+ def run(self, iterator, play_context):
+ '''
+ The "free" strategy is a bit more complex, in that it allows tasks to
+ be sent to hosts as quickly as they can be processed. This means that
+ some hosts may finish very quickly if run tasks result in little or no
+ work being done versus other systems.
+
+ The algorithm used here also tries to be more "fair" when iterating
+ through hosts by remembering the last host in the list to be given a task
+ and starting the search from there as opposed to the top of the hosts
+ list again, which would end up favoring hosts near the beginning of the
+ list.
+ '''
+
+ # the last host to be given a task
+ last_host = 0
+
+ result = self._tqm.RUN_OK
+
+ # start with all workers being counted as being free
+ workers_free = len(self._workers)
+
+ self._set_hosts_cache(iterator._play)
+
+ if iterator._play.max_fail_percentage is not None:
+ display.warning("Using max_fail_percentage with the free strategy is not supported, as tasks are executed independently on each host")
+
+ work_to_do = True
+ while work_to_do and not self._tqm._terminated:
+
+ hosts_left = self.get_hosts_left(iterator)
+
+ if len(hosts_left) == 0:
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+
+ work_to_do = False # assume we have no more work to do
+ starting_host = last_host # save current position so we know when we've looped back around and need to break
+
+ # try and find an unblocked host with a task to run
+ host_results = []
+ while True:
+ host = hosts_left[last_host]
+ display.debug("next free host: %s" % host)
+ host_name = host.get_name()
+
+ # peek at the next task for the host, to see if there's
+ # anything to do do for this host
+ (state, task) = iterator.get_next_task_for_host(host, peek=True)
+ display.debug("free host state: %s" % state, host=host_name)
+ display.debug("free host task: %s" % task, host=host_name)
+
+ # check if there is work to do, either there is a task or the host is still blocked which could
+ # mean that it is processing an include task and after its result is processed there might be
+ # more tasks to run
+ if (task or self._blocked_hosts.get(host_name, False)) and not self._tqm._unreachable_hosts.get(host_name, False):
+ display.debug("this host has work to do", host=host_name)
+ # set the flag so the outer loop knows we've still found
+ # some work which needs to be done
+ work_to_do = True
+
+ if not self._tqm._unreachable_hosts.get(host_name, False) and task:
+ # check to see if this host is blocked (still executing a previous task)
+ if not self._blocked_hosts.get(host_name, False):
+ display.debug("getting variables", host=host_name)
+ task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all)
+ self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ display.debug("done getting variables", host=host_name)
+
+ try:
+ throttle = int(templar.template(task.throttle))
+ except Exception as e:
+ raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
+
+ if throttle > 0:
+ same_tasks = 0
+ for worker in self._workers:
+ if worker and worker.is_alive() and worker._task._uuid == task._uuid:
+ same_tasks += 1
+
+ display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks))
+ if same_tasks >= throttle:
+ break
+
+ # advance the host, mark the host blocked, and queue it
+ self._blocked_hosts[host_name] = True
+ iterator.set_state_for_host(host.name, state)
+
+ try:
+ action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ action = None
+
+ try:
+ task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
+ display.debug("done templating", host=host_name)
+ except Exception:
+ # just ignore any errors during task name templating,
+ # we don't care if it just shows the raw name
+ display.debug("templating failed for some reason", host=host_name)
+
+ run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
+ if run_once:
+ if action and getattr(action, 'BYPASS_HOST_LOOP', False):
+ raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy "
+ "and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
+ else:
+ display.warning("Using run_once with the free strategy is not currently supported. This task will still be "
+ "executed for every host in the inventory list.")
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if not isinstance(task, Handler) and task._role and task._role.has_run(host):
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ display.debug("'%s' skipped because role has already run" % task, host=host_name)
+ del self._blocked_hosts[host_name]
+ continue
+
+ if task.action in C._ACTION_META:
+ self._execute_meta(task, play_context, iterator, target_host=host)
+ self._blocked_hosts[host_name] = False
+ else:
+ # handle step if needed, skip meta actions as they are used internally
+ if not self._step or self._take_step(task, host_name):
+ if task.any_errors_fatal:
+ display.warning("Using any_errors_fatal with the free strategy is not supported, "
+ "as tasks are executed independently on each host")
+ if isinstance(task, Handler):
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
+ else:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ self._queue_task(host, task, task_vars, play_context)
+ # each task is counted as a worker being busy
+ workers_free -= 1
+ del task_vars
+ else:
+ display.debug("%s is blocked, skipping for now" % host_name)
+
+ # all workers have tasks to do (and the current host isn't done with the play).
+ # loop back to starting host and break out
+ if self._host_pinned and workers_free == 0 and work_to_do:
+ last_host = starting_host
+ break
+
+ # move on to the next host and make sure we
+ # haven't gone past the end of our hosts list
+ last_host += 1
+ if last_host > len(hosts_left) - 1:
+ last_host = 0
+
+ # if we've looped around back to the start, break out
+ if last_host == starting_host:
+ break
+
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
+
+ # each result is counted as a worker being free again
+ workers_free += len(results)
+
+ self.update_active_connections(results)
+
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+
+ if len(included_files) > 0:
+ all_blocks = dict((host, []) for host in hosts_left)
+ failed_includes_hosts = set()
+ for included_file in included_files:
+ display.debug("collecting new blocks for %s" % included_file)
+ is_handler = False
+ try:
+ if included_file._is_role:
+ new_ir = self._copy_included_file(included_file)
+
+ new_blocks, handler_blocks = new_ir.get_block_list(
+ play=iterator._play,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ is_handler = isinstance(included_file._task, Handler)
+ new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=is_handler)
+
+ # let PlayIterator know about any new handlers included via include_role or
+ # import_role within include_role/include_taks
+ iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
+ except AnsibleParserError:
+ raise
+ except AnsibleError as e:
+ if included_file._is_role:
+ # include_role does not have on_include callback so display the error
+ display.error(to_text(e), wrap_text=False)
+ for r in included_file._results:
+ r._result['failed'] = True
+ failed_includes_hosts.add(r._host)
+ continue
+
+ for new_block in new_blocks:
+ if is_handler:
+ for task in new_block.block:
+ task.notified_hosts = included_file._hosts[:]
+ final_block = new_block
+ else:
+ task_vars = self._variable_manager.get_vars(
+ play=iterator._play,
+ task=new_block.get_first_parent_include(),
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all,
+ )
+ final_block = new_block.filter_tagged_tasks(task_vars)
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(final_block)
+ display.debug("done collecting new blocks for %s" % included_file)
+
+ for host in failed_includes_hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+
+ display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+ display.debug("done adding collected blocks to iterator")
+
+ # pause briefly so we don't spin lock
+ time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
+
+ # collect all the final results
+ results = self._wait_on_pending_results(iterator)
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/lib/ansible/plugins/strategy/host_pinned.py b/lib/ansible/plugins/strategy/host_pinned.py
new file mode 100644
index 0000000..70f22eb
--- /dev/null
+++ b/lib/ansible/plugins/strategy/host_pinned.py
@@ -0,0 +1,45 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: host_pinned
+ short_description: Executes tasks on each host without interruption
+ description:
+ - Task execution is as fast as possible per host in batch as defined by C(serial) (default all).
+ Ansible will not start a play for a host unless the play can be finished without interruption by tasks for another host,
+ i.e. the number of hosts with an active play does not exceed the number of forks.
+ Ansible will not wait for other hosts to finish the current task before queuing the next task for a host that has finished.
+ Once a host is done with the play, it opens it's slot to a new host that was waiting to start.
+ Other than that, it behaves just like the "free" strategy.
+ version_added: "2.7"
+ author: Ansible Core Team
+'''
+
+from ansible.plugins.strategy.free import StrategyModule as FreeStrategyModule
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(FreeStrategyModule):
+
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self._host_pinned = True
diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py
new file mode 100644
index 0000000..a3c91c2
--- /dev/null
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -0,0 +1,406 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: linear
+ short_description: Executes tasks in a linear fashion
+ description:
+ - Task execution is in lockstep per host batch as defined by C(serial) (default all).
+ Up to the fork limit of hosts will execute each task at the same time and then
+ the next series of hosts until the batch is done, before going on to the next task.
+ version_added: "2.0"
+ notes:
+ - This was the default Ansible behaviour before 'strategy plugins' were introduced in 2.0.
+ author: Ansible Core Team
+'''
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError, AnsibleParserError
+from ansible.executor.play_iterator import IteratingStates, FailedStates
+from ansible.module_utils._text import to_text
+from ansible.playbook.handler import Handler
+from ansible.playbook.included_file import IncludedFile
+from ansible.playbook.task import Task
+from ansible.plugins.loader import action_loader
+from ansible.plugins.strategy import StrategyBase
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(StrategyBase):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # used for the lockstep to indicate to run handlers
+ self._in_handlers = False
+
+ def _get_next_task_lockstep(self, hosts, iterator):
+ '''
+ Returns a list of (host, task) tuples, where the task may
+ be a noop task to keep the iterator in lock step across
+ all hosts.
+ '''
+ noop_task = Task()
+ noop_task.action = 'meta'
+ noop_task.args['_raw_params'] = 'noop'
+ noop_task.implicit = True
+ noop_task.set_loader(iterator._play._loader)
+
+ state_task_per_host = {}
+ for host in hosts:
+ state, task = iterator.get_next_task_for_host(host, peek=True)
+ if task is not None:
+ state_task_per_host[host] = state, task
+
+ if not state_task_per_host:
+ return [(h, None) for h in hosts]
+
+ if self._in_handlers and not any(filter(
+ lambda rs: rs == IteratingStates.HANDLERS,
+ (s.run_state for s, _ in state_task_per_host.values()))
+ ):
+ self._in_handlers = False
+
+ if self._in_handlers:
+ lowest_cur_handler = min(
+ s.cur_handlers_task for s, t in state_task_per_host.values()
+ if s.run_state == IteratingStates.HANDLERS
+ )
+ else:
+ task_uuids = [t._uuid for s, t in state_task_per_host.values()]
+ _loop_cnt = 0
+ while _loop_cnt <= 1:
+ try:
+ cur_task = iterator.all_tasks[iterator.cur_task]
+ except IndexError:
+ # pick up any tasks left after clear_host_errors
+ iterator.cur_task = 0
+ _loop_cnt += 1
+ else:
+ iterator.cur_task += 1
+ if cur_task._uuid in task_uuids:
+ break
+ else:
+ # prevent infinite loop
+ raise AnsibleAssertionError(
+ 'BUG: There seems to be a mismatch between tasks in PlayIterator and HostStates.'
+ )
+
+ host_tasks = []
+ for host, (state, task) in state_task_per_host.items():
+ if ((self._in_handlers and lowest_cur_handler == state.cur_handlers_task) or
+ (not self._in_handlers and cur_task._uuid == task._uuid)):
+ iterator.set_state_for_host(host.name, state)
+ host_tasks.append((host, task))
+ else:
+ host_tasks.append((host, noop_task))
+
+ # once hosts synchronize on 'flush_handlers' lockstep enters
+ # '_in_handlers' phase where handlers are run instead of tasks
+ # until at least one host is in IteratingStates.HANDLERS
+ if (not self._in_handlers and cur_task.action in C._ACTION_META and
+ cur_task.args.get('_raw_params') == 'flush_handlers'):
+ self._in_handlers = True
+
+ return host_tasks
+
+ def run(self, iterator, play_context):
+ '''
+ The linear strategy is simple - get the next task and queue
+ it for all hosts, then wait for the queue to drain before
+ moving on to the next task
+ '''
+
+ # iterate over each task, while there is one left to run
+ result = self._tqm.RUN_OK
+ work_to_do = True
+
+ self._set_hosts_cache(iterator._play)
+
+ while work_to_do and not self._tqm._terminated:
+
+ try:
+ display.debug("getting the remaining hosts for this loop")
+ hosts_left = self.get_hosts_left(iterator)
+ display.debug("done getting the remaining hosts for this loop")
+
+ # queue up this task for each host in the inventory
+ callback_sent = False
+ work_to_do = False
+
+ host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
+
+ # skip control
+ skip_rest = False
+ choose_step = True
+
+ # flag set if task is set to any_errors_fatal
+ any_errors_fatal = False
+
+ results = []
+ for (host, task) in host_tasks:
+ if not task:
+ continue
+
+ if self._tqm._terminated:
+ break
+
+ run_once = False
+ work_to_do = True
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if not isinstance(task, Handler) and task._role and task._role.has_run(host):
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ display.debug("'%s' skipped because role has already run" % task)
+ continue
+
+ display.debug("getting variables")
+ task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ display.debug("done getting variables")
+
+ # test to see if the task across all hosts points to an action plugin which
+ # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
+ # will only send this task to the first host in the list.
+
+ task_action = templar.template(task.action)
+
+ try:
+ action = action_loader.get(task_action, class_only=True, collection_list=task.collections)
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ action = None
+
+ if task_action in C._ACTION_META:
+ # for the linear strategy, we run meta tasks just once and for
+ # all hosts currently being iterated over rather than one host
+ results.extend(self._execute_meta(task, play_context, iterator, host))
+ if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host', 'role_complete', 'flush_handlers'):
+ run_once = True
+ if (task.any_errors_fatal or run_once) and not task.ignore_errors:
+ any_errors_fatal = True
+ else:
+ # handle step if needed, skip meta actions as they are used internally
+ if self._step and choose_step:
+ if self._take_step(task):
+ choose_step = False
+ else:
+ skip_rest = True
+ break
+
+ run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
+
+ if (task.any_errors_fatal or run_once) and not task.ignore_errors:
+ any_errors_fatal = True
+
+ if not callback_sent:
+ display.debug("sending task start callback, copying the task so we can template it temporarily")
+ saved_name = task.name
+ display.debug("done copying, going to template now")
+ try:
+ task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
+ display.debug("done templating")
+ except Exception:
+ # just ignore any errors during task name templating,
+ # we don't care if it just shows the raw name
+ display.debug("templating failed for some reason")
+ display.debug("here goes the callback...")
+ if isinstance(task, Handler):
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
+ else:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ task.name = saved_name
+ callback_sent = True
+ display.debug("sending task start callback")
+
+ self._blocked_hosts[host.get_name()] = True
+ self._queue_task(host, task, task_vars, play_context)
+ del task_vars
+
+ # if we're bypassing the host loop, break out now
+ if run_once:
+ break
+
+ results.extend(self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))))
+
+ # go to next host/task group
+ if skip_rest:
+ continue
+
+ display.debug("done queuing things up, now waiting for results queue to drain")
+ if self._pending_results > 0:
+ results.extend(self._wait_on_pending_results(iterator))
+
+ self.update_active_connections(results)
+
+ included_files = IncludedFile.process_include_results(
+ results,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+
+ if len(included_files) > 0:
+ display.debug("we have included files to process")
+
+ display.debug("generating all_blocks data")
+ all_blocks = dict((host, []) for host in hosts_left)
+ display.debug("done generating all_blocks data")
+ included_tasks = []
+ failed_includes_hosts = set()
+ for included_file in included_files:
+ display.debug("processing included file: %s" % included_file._filename)
+ is_handler = False
+ try:
+ if included_file._is_role:
+ new_ir = self._copy_included_file(included_file)
+
+ new_blocks, handler_blocks = new_ir.get_block_list(
+ play=iterator._play,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ is_handler = isinstance(included_file._task, Handler)
+ new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=is_handler)
+
+ # let PlayIterator know about any new handlers included via include_role or
+ # import_role within include_role/include_taks
+ iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
+
+ display.debug("iterating over new_blocks loaded from include file")
+ for new_block in new_blocks:
+ if is_handler:
+ for task in new_block.block:
+ task.notified_hosts = included_file._hosts[:]
+ final_block = new_block
+ else:
+ task_vars = self._variable_manager.get_vars(
+ play=iterator._play,
+ task=new_block.get_first_parent_include(),
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all,
+ )
+ display.debug("filtering new block on tags")
+ final_block = new_block.filter_tagged_tasks(task_vars)
+ display.debug("done filtering new block on tags")
+
+ included_tasks.extend(final_block.get_tasks())
+
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(final_block)
+
+ display.debug("done iterating over new_blocks loaded from include file")
+ except AnsibleParserError:
+ raise
+ except AnsibleError as e:
+ if included_file._is_role:
+ # include_role does not have on_include callback so display the error
+ display.error(to_text(e), wrap_text=False)
+ for r in included_file._results:
+ r._result['failed'] = True
+ failed_includes_hosts.add(r._host)
+ continue
+
+ for host in failed_includes_hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+
+ # finally go through all of the hosts and append the
+ # accumulated blocks to their list of tasks
+ display.debug("extending task lists for all hosts with included blocks")
+
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+
+ iterator.all_tasks[iterator.cur_task:iterator.cur_task] = included_tasks
+
+ display.debug("done extending task lists")
+ display.debug("done processing included files")
+
+ display.debug("results queue empty")
+
+ display.debug("checking for any_errors_fatal")
+ failed_hosts = []
+ unreachable_hosts = []
+ for res in results:
+ # execute_meta() does not set 'failed' in the TaskResult
+ # so we skip checking it with the meta tasks and look just at the iterator
+ if (res.is_failed() or res._task.action in C._ACTION_META) and iterator.is_failed(res._host):
+ failed_hosts.append(res._host.name)
+ elif res.is_unreachable():
+ unreachable_hosts.append(res._host.name)
+
+ # if any_errors_fatal and we had an error, mark all hosts as failed
+ if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
+ dont_fail_states = frozenset([IteratingStates.RESCUE, IteratingStates.ALWAYS])
+ for host in hosts_left:
+ (s, _) = iterator.get_next_task_for_host(host, peek=True)
+ # the state may actually be in a child state, use the get_active_state()
+ # method in the iterator to figure out the true active state
+ s = iterator.get_active_state(s)
+ if s.run_state not in dont_fail_states or \
+ s.run_state == IteratingStates.RESCUE and s.fail_state & FailedStates.RESCUE != 0:
+ self._tqm._failed_hosts[host.name] = True
+ result |= self._tqm.RUN_FAILED_BREAK_PLAY
+ display.debug("done checking for any_errors_fatal")
+
+ display.debug("checking for max_fail_percentage")
+ if iterator._play.max_fail_percentage is not None and len(results) > 0:
+ percentage = iterator._play.max_fail_percentage / 100.0
+
+ if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage:
+ for host in hosts_left:
+ # don't double-mark hosts, or the iterator will potentially
+ # fail them out of the rescue/always states
+ if host.name not in failed_hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result |= self._tqm.RUN_FAILED_BREAK_PLAY
+ display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage))
+ display.debug("done checking for max_fail_percentage")
+
+ display.debug("checking to see if all hosts have failed and the running result is not ok")
+ if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
+ display.debug("^ not ok, so returning result now")
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ return result
+ display.debug("done checking to see if all hosts have failed")
+
+ except (IOError, EOFError) as e:
+ display.debug("got IOError/EOFError in task loop: %s" % e)
+ # most likely an abort, return failed
+ return self._tqm.RUN_UNKNOWN_ERROR
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/lib/ansible/plugins/terminal/__init__.py b/lib/ansible/plugins/terminal/__init__.py
new file mode 100644
index 0000000..d464b07
--- /dev/null
+++ b/lib/ansible/plugins/terminal/__init__.py
@@ -0,0 +1,133 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from abc import ABC, abstractmethod
+
+from ansible.errors import AnsibleConnectionFailure
+
+
+class TerminalBase(ABC):
+ '''
+ A base class for implementing cli connections
+
+ .. note:: Unlike most of Ansible, nearly all strings in
+ :class:`TerminalBase` plugins are byte strings. This is because of
+ how close to the underlying platform these plugins operate. Remember
+ to mark literal strings as byte string (``b"string"``) and to use
+ :func:`~ansible.module_utils._text.to_bytes` and
+ :func:`~ansible.module_utils._text.to_text` to avoid unexpected
+ problems.
+ '''
+
+ #: compiled bytes regular expressions as stdout
+ terminal_stdout_re = [] # type: list[re.Pattern]
+
+ #: compiled bytes regular expressions as stderr
+ terminal_stderr_re = [] # type: list[re.Pattern]
+
+ #: compiled bytes regular expressions to remove ANSI codes
+ ansi_re = [
+ re.compile(br'\x1b\[\?1h\x1b='), # CSI ? 1 h ESC =
+ re.compile(br'\x08.'), # [Backspace] .
+ re.compile(br"\x1b\[m"), # ANSI reset code
+ ]
+
+ #: terminal initial prompt
+ terminal_initial_prompt = None
+
+ #: terminal initial answer
+ terminal_initial_answer = None
+
+ #: Send newline after prompt match
+ terminal_inital_prompt_newline = True
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def _exec_cli_command(self, cmd, check_rc=True):
+ '''
+ Executes the CLI command on the remote device and returns the output
+
+ :arg cmd: Byte string command to be executed
+ '''
+ return self._connection.exec_command(cmd)
+
+ def _get_prompt(self):
+ """
+ Returns the current prompt from the device
+
+ :returns: A byte string of the prompt
+ """
+ return self._connection.get_prompt()
+
+ def on_open_shell(self):
+ """Called after the SSH session is established
+
+ This method is called right after the invoke_shell() is called from
+ the Paramiko SSHClient instance. It provides an opportunity to setup
+ terminal parameters such as disbling paging for instance.
+ """
+ pass
+
+ def on_close_shell(self):
+ """Called before the connection is closed
+
+ This method gets called once the connection close has been requested
+ but before the connection is actually closed. It provides an
+ opportunity to clean up any terminal resources before the shell is
+ actually closed
+ """
+ pass
+
+ def on_become(self, passwd=None):
+ """Called when privilege escalation is requested
+
+ :kwarg passwd: String containing the password
+
+ This method is called when the privilege is requested to be elevated
+ in the play context by setting become to True. It is the responsibility
+ of the terminal plugin to actually do the privilege escalation such
+ as entering `enable` mode for instance
+ """
+ pass
+
+ def on_unbecome(self):
+ """Called when privilege deescalation is requested
+
+ This method is called when the privilege changed from escalated
+ (become=True) to non escalated (become=False). It is the responsibility
+ of this method to actually perform the deauthorization procedure
+ """
+ pass
+
+ def on_authorize(self, passwd=None):
+ """Deprecated method for privilege escalation
+
+ :kwarg passwd: String containing the password
+ """
+ return self.on_become(passwd)
+
+ def on_deauthorize(self):
+ """Deprecated method for privilege deescalation
+ """
+ return self.on_unbecome()
diff --git a/lib/ansible/plugins/test/__init__.py b/lib/ansible/plugins/test/__init__.py
new file mode 100644
index 0000000..1400316
--- /dev/null
+++ b/lib/ansible/plugins/test/__init__.py
@@ -0,0 +1,13 @@
+# (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins import AnsibleJinja2Plugin
+
+
+class AnsibleJinja2Test(AnsibleJinja2Plugin):
+
+ def _no_options(self, *args, **kwargs):
+ raise NotImplementedError("Jinaj2 test plugins do not support option functions, they use direct arguments instead.")
diff --git a/lib/ansible/plugins/test/abs.yml b/lib/ansible/plugins/test/abs.yml
new file mode 100644
index 0000000..46f7f70
--- /dev/null
+++ b/lib/ansible/plugins/test/abs.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: abs
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: is the path absolute
+ aliases: [is_abs]
+ description:
+ - Check if the provided path is absolute, not relative.
+ - An absolute path expresses the location of a filesystem object starting at the filesystem root and requires no context.
+ - A relative path does not start at the filesystem root and requires a 'current' directory as a context to resolve.
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ is_path_absolute: "{{ '/etc/hosts' is abs }}}"
+ relative_paths: "{{ all_paths | reject('abs') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path is absolute, C(False) if it is relative.
+ type: boolean
diff --git a/lib/ansible/plugins/test/all.yml b/lib/ansible/plugins/test/all.yml
new file mode 100644
index 0000000..e227d6e
--- /dev/null
+++ b/lib/ansible/plugins/test/all.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: all
+ author: Ansible Core
+ version_added: "2.4"
+ short_description: are all conditions in a list true
+ description:
+ - This test checks each condition in a list for truthiness.
+ - Same as the C(all) Python function.
+ options:
+ _input:
+ description: List of conditions, each can be a boolean or conditional expression that results in a boolean value.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ varexpression: "{{ 3 == 3 }}"
+ # are all statements true?
+ {{ [true, booleanvar, varexpression] is all }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if all elements of the list were True, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/any.yml b/lib/ansible/plugins/test/any.yml
new file mode 100644
index 0000000..0ce9e48
--- /dev/null
+++ b/lib/ansible/plugins/test/any.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: any
+ author: Ansible Core
+ version_added: "2.4"
+ short_description: is any conditions in a list true
+ description:
+ - This test checks each condition in a list for truthiness.
+ - Same as the C(any) Python function.
+ options:
+ _input:
+ description: List of conditions, each can be a boolean or conditional expression that results in a boolean value.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ varexpression: "{{ 3 == 3 }}"
+ # are all statements true?
+ {{ [false, booleanvar, varexpression] is any}}
+
+RETURN:
+ _value:
+ description: Returns C(True) if any element of the list was true, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/change.yml b/lib/ansible/plugins/test/change.yml
new file mode 100644
index 0000000..1fb1e5e
--- /dev/null
+++ b/lib/ansible/plugins/test/change.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: changed
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: did the task require changes
+ aliases: [change]
+ description:
+ - Tests if task required changes to complete
+ - This test checks for the existance of a C(changed) key in the input dictionary and that it is C(True) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is changed }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was required changes, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/changed.yml b/lib/ansible/plugins/test/changed.yml
new file mode 100644
index 0000000..1fb1e5e
--- /dev/null
+++ b/lib/ansible/plugins/test/changed.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: changed
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: did the task require changes
+ aliases: [change]
+ description:
+ - Tests if task required changes to complete
+ - This test checks for the existance of a C(changed) key in the input dictionary and that it is C(True) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is changed }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was required changes, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/contains.yml b/lib/ansible/plugins/test/contains.yml
new file mode 100644
index 0000000..68741da
--- /dev/null
+++ b/lib/ansible/plugins/test/contains.yml
@@ -0,0 +1,49 @@
+DOCUMENTATION:
+ name: contains
+ author: Ansible Core
+ version_added: "2.4"
+ short_description: does the list contain this element
+ description:
+ - Checks the supplied element against the input list to see if it exists within it.
+ options:
+ _input:
+ description: List of elements to compare.
+ type: list
+ elements: raw
+ required: True
+ _contained:
+ description: Element to test for.
+ type: raw
+ required: True
+EXAMPLES: |
+ # simple expression
+ {{ listofthings is contains('this') }}
+
+ # as a selector
+ - action: module=doessomething
+ when: lacp_groups|selectattr('interfaces', 'contains', 'em1')|first).master
+ vars:
+ lacp_groups:
+ - master: lacp0
+ network: 10.65.100.0/24
+ gateway: 10.65.100.1
+ dns4:
+ - 10.65.100.10
+ - 10.65.100.11
+ interfaces:
+ - em1
+ - em2
+
+ - master: lacp1
+ network: 10.65.120.0/24
+ gateway: 10.65.120.1
+ dns4:
+ - 10.65.100.10
+ - 10.65.100.11
+ interfaces:
+ - em3
+ - em4
+RETURN:
+ _value:
+ description: Returns C(True) if the specified element is contained in the supplied sequence, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py
new file mode 100644
index 0000000..d9e7e8b
--- /dev/null
+++ b/lib/ansible/plugins/test/core.py
@@ -0,0 +1,287 @@
+# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import operator as py_operator
+
+from collections.abc import MutableMapping, MutableSequence
+
+from ansible.module_utils.compat.version import LooseVersion, StrictVersion
+
+from ansible import errors
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.utils.display import Display
+from ansible.utils.version import SemanticVersion
+
+try:
+ from packaging.version import Version as PEP440Version
+ HAS_PACKAGING = True
+except ImportError:
+ HAS_PACKAGING = False
+
+display = Display()
+
+
+def failed(result):
+ ''' Test if task result yields failed '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'failed' test expects a dictionary")
+ return result.get('failed', False)
+
+
+def success(result):
+ ''' Test if task result yields success '''
+ return not failed(result)
+
+
+def unreachable(result):
+ ''' Test if task result yields unreachable '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'unreachable' test expects a dictionary")
+ return result.get('unreachable', False)
+
+
+def reachable(result):
+ ''' Test if task result yields reachable '''
+ return not unreachable(result)
+
+
+def changed(result):
+ ''' Test if task result yields changed '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'changed' test expects a dictionary")
+ if 'changed' not in result:
+ changed = False
+ if (
+ 'results' in result and # some modules return a 'results' key
+ isinstance(result['results'], MutableSequence) and
+ isinstance(result['results'][0], MutableMapping)
+ ):
+ for res in result['results']:
+ if res.get('changed', False):
+ changed = True
+ break
+ else:
+ changed = result.get('changed', False)
+ return changed
+
+
+def skipped(result):
+ ''' Test if task result yields skipped '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'skipped' test expects a dictionary")
+ return result.get('skipped', False)
+
+
+def started(result):
+ ''' Test if async task has started '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'started' test expects a dictionary")
+ if 'started' in result:
+ # For async tasks, return status
+ # NOTE: The value of started is 0 or 1, not False or True :-/
+ return result.get('started', 0) == 1
+ else:
+ # For non-async tasks, warn user, but return as if started
+ display.warning("The 'started' test expects an async task, but a non-async task was tested")
+ return True
+
+
+def finished(result):
+ ''' Test if async task has finished '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'finished' test expects a dictionary")
+ if 'finished' in result:
+ # For async tasks, return status
+ # NOTE: The value of finished is 0 or 1, not False or True :-/
+ return result.get('finished', 0) == 1
+ else:
+ # For non-async tasks, warn user, but return as if finished
+ display.warning("The 'finished' test expects an async task, but a non-async task was tested")
+ return True
+
+
+def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
+ ''' Expose `re` as a boolean filter using the `search` method by default.
+ This is likely only useful for `search` and `match` which already
+ have their own filters.
+ '''
+ # In addition to ensuring the correct type, to_text here will ensure
+ # _fail_with_undefined_error happens if the value is Undefined
+ value = to_text(value, errors='surrogate_or_strict')
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ _re = re.compile(pattern, flags=flags)
+ return bool(getattr(_re, match_type, 'search')(value))
+
+
+def vault_encrypted(value):
+ """Evaulate whether a variable is a single vault encrypted value
+
+ .. versionadded:: 2.10
+ """
+ return getattr(value, '__ENCRYPTED__', False) and value.is_encrypted()
+
+
+def match(value, pattern='', ignorecase=False, multiline=False):
+ ''' Perform a `re.match` returning a boolean '''
+ return regex(value, pattern, ignorecase, multiline, 'match')
+
+
+def search(value, pattern='', ignorecase=False, multiline=False):
+ ''' Perform a `re.search` returning a boolean '''
+ return regex(value, pattern, ignorecase, multiline, 'search')
+
+
+def version_compare(value, version, operator='eq', strict=None, version_type=None):
+ ''' Perform a version comparison on a value '''
+ op_map = {
+ '==': 'eq', '=': 'eq', 'eq': 'eq',
+ '<': 'lt', 'lt': 'lt',
+ '<=': 'le', 'le': 'le',
+ '>': 'gt', 'gt': 'gt',
+ '>=': 'ge', 'ge': 'ge',
+ '!=': 'ne', '<>': 'ne', 'ne': 'ne'
+ }
+
+ type_map = {
+ 'loose': LooseVersion,
+ 'strict': StrictVersion,
+ 'semver': SemanticVersion,
+ 'semantic': SemanticVersion,
+ 'pep440': PEP440Version,
+ }
+
+ if strict is not None and version_type is not None:
+ raise errors.AnsibleFilterError("Cannot specify both 'strict' and 'version_type'")
+
+ if not value:
+ raise errors.AnsibleFilterError("Input version value cannot be empty")
+
+ if not version:
+ raise errors.AnsibleFilterError("Version parameter to compare against cannot be empty")
+
+ if version_type == 'pep440' and not HAS_PACKAGING:
+ raise errors.AnsibleFilterError("The pep440 version_type requires the Python 'packaging' library")
+
+ Version = LooseVersion
+ if strict:
+ Version = StrictVersion
+ elif version_type:
+ try:
+ Version = type_map[version_type]
+ except KeyError:
+ raise errors.AnsibleFilterError(
+ "Invalid version type (%s). Must be one of %s" % (version_type, ', '.join(map(repr, type_map)))
+ )
+
+ if operator in op_map:
+ operator = op_map[operator]
+ else:
+ raise errors.AnsibleFilterError(
+ 'Invalid operator type (%s). Must be one of %s' % (operator, ', '.join(map(repr, op_map)))
+ )
+
+ try:
+ method = getattr(py_operator, operator)
+ return method(Version(to_text(value)), Version(to_text(version)))
+ except Exception as e:
+ raise errors.AnsibleFilterError('Version comparison failed: %s' % to_native(e))
+
+
+def truthy(value, convert_bool=False):
+ """Evaluate as value for truthiness using python ``bool``
+
+ Optionally, attempt to do a conversion to bool from boolean like values
+ such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
+
+ .. versionadded:: 2.10
+ """
+ if convert_bool:
+ try:
+ value = boolean(value)
+ except TypeError:
+ pass
+
+ return bool(value)
+
+
+def falsy(value, convert_bool=False):
+ """Evaluate as value for falsiness using python ``bool``
+
+ Optionally, attempt to do a conversion to bool from boolean like values
+ such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
+
+ .. versionadded:: 2.10
+ """
+ return not truthy(value, convert_bool=convert_bool)
+
+
+class TestModule(object):
+ ''' Ansible core jinja2 tests '''
+
+ def tests(self):
+ return {
+ # failure testing
+ 'failed': failed,
+ 'failure': failed,
+ 'succeeded': success,
+ 'success': success,
+ 'successful': success,
+ 'reachable': reachable,
+ 'unreachable': unreachable,
+
+ # changed testing
+ 'changed': changed,
+ 'change': changed,
+
+ # skip testing
+ 'skipped': skipped,
+ 'skip': skipped,
+
+ # async testing
+ 'finished': finished,
+ 'started': started,
+
+ # regex
+ 'match': match,
+ 'search': search,
+ 'regex': regex,
+
+ # version comparison
+ 'version_compare': version_compare,
+ 'version': version_compare,
+
+ # lists
+ 'any': any,
+ 'all': all,
+
+ # truthiness
+ 'truthy': truthy,
+ 'falsy': falsy,
+
+ # vault
+ 'vault_encrypted': vault_encrypted,
+ }
diff --git a/lib/ansible/plugins/test/directory.yml b/lib/ansible/plugins/test/directory.yml
new file mode 100644
index 0000000..5d7fa78
--- /dev/null
+++ b/lib/ansible/plugins/test/directory.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: directory
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to an existing directory
+ description:
+ - Check if the provided path maps to an existing directory on the controller's filesystem (localhost).
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ my_etc_hosts_not_a_dir: "{{ '/etc/hosts' is directory}}"
+ list_of_files: "{{ list_of_paths | reject('directory') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing directory on the filesystem on the controller, c(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/exists.yml b/lib/ansible/plugins/test/exists.yml
new file mode 100644
index 0000000..85f9108
--- /dev/null
+++ b/lib/ansible/plugins/test/exists.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: exists
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path exist, follow symlinks
+ description:
+ - Check if the provided path maps to an existing filesystem object on the controller (localhost).
+ - Follows symlinks and checks the target of the symlink instead of the link itself, use the C(link) or C(link_exists) tests to check on the link.
+ options:
+ _input:
+ description: a path
+ type: path
+
+EXAMPLES: |
+ vars:
+ my_etc_hosts_exists: "{{ '/etc/hosts' is exist }}"
+ list_of_local_files_to_copy_to_remote: "{{ list_of_all_possible_files | select('exists') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing filesystem object on the controller (after following symlinks), C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/failed.yml b/lib/ansible/plugins/test/failed.yml
new file mode 100644
index 0000000..b8a9b3e
--- /dev/null
+++ b/lib/ansible/plugins/test/failed.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: failed
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: did the task fail
+ aliases: [failure]
+ description:
+ - Tests if task finished in failure, opposite of C(succeeded).
+ - This test checks for the existance of a C(failed) key in the input dictionary and that it is C(True) if present.
+ - Tasks that get skipped or not executed due to other failures (syntax, templating, unreachable host, etc) do not return a 'failed' status.
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ taskresults is failed }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was failed, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/failure.yml b/lib/ansible/plugins/test/failure.yml
new file mode 100644
index 0000000..b8a9b3e
--- /dev/null
+++ b/lib/ansible/plugins/test/failure.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: failed
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: did the task fail
+ aliases: [failure]
+ description:
+ - Tests if task finished in failure, opposite of C(succeeded).
+ - This test checks for the existance of a C(failed) key in the input dictionary and that it is C(True) if present.
+ - Tasks that get skipped or not executed due to other failures (syntax, templating, unreachable host, etc) do not return a 'failed' status.
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ taskresults is failed }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was failed, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/falsy.yml b/lib/ansible/plugins/test/falsy.yml
new file mode 100644
index 0000000..49a198f
--- /dev/null
+++ b/lib/ansible/plugins/test/falsy.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: falsy
+ author: Ansible Core
+ version_added: "2.10"
+ short_description: Pythonic false
+ description:
+ - This check is a more Python version of what is 'false'.
+ - It is the opposite of 'truthy'.
+ options:
+ _input:
+ description: An expression that can be expressed in a boolean context.
+ type: string
+ required: True
+ convert_bool:
+ description: Attempts to convert the result to a strict Python boolean vs normally acceptable values (C(yes)/C(no), C(on)/C(off), C(0)/C(1), etc).
+ type: bool
+ default: false
+EXAMPLES: |
+ thisisfalse: '{{ "any string" is falsy }}'
+ thisistrue: '{{ "" is falsy }}'
+RETURN:
+ _value:
+ description: Returns C(False) if the condition is not "Python truthy", C(True) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/file.yml b/lib/ansible/plugins/test/file.yml
new file mode 100644
index 0000000..8b79c07
--- /dev/null
+++ b/lib/ansible/plugins/test/file.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: file
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to an existing file
+ description:
+ - Check if the provided path maps to an existing file on the controller's filesystem (localhost)
+ aliases: [is_file]
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ my_etc_hosts_is_a_file: "{{ '/etc/hosts' is file }}"
+ list_of_files: "{{ list_of_paths | select('file') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing file on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/files.py b/lib/ansible/plugins/test/files.py
new file mode 100644
index 0000000..35761a4
--- /dev/null
+++ b/lib/ansible/plugins/test/files.py
@@ -0,0 +1,48 @@
+# (c) 2015, Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
+from ansible import errors
+
+
+class TestModule(object):
+ ''' Ansible file jinja2 tests '''
+
+ def tests(self):
+ return {
+ # file testing
+ 'directory': isdir,
+ 'is_dir': isdir,
+ 'file': isfile,
+ 'is_file': isfile,
+ 'link': islink,
+ 'is_link': islink,
+ 'exists': exists,
+ 'link_exists': lexists,
+
+ # path testing
+ 'abs': isabs,
+ 'is_abs': isabs,
+ 'same_file': samefile,
+ 'is_same_file': samefile,
+ 'mount': ismount,
+ 'is_mount': ismount,
+ }
diff --git a/lib/ansible/plugins/test/finished.yml b/lib/ansible/plugins/test/finished.yml
new file mode 100644
index 0000000..b01b132
--- /dev/null
+++ b/lib/ansible/plugins/test/finished.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: finished
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Did async task finish
+ description:
+ - Used to test if an async task has finished, it will aslo work with normal tasks but will issue a warning.
+ - This test checks for the existance of a C(finished) key in the input dictionary and that it is C(1) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (asynctaskpoll is finished}}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the aysnc task has finished, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_abs.yml b/lib/ansible/plugins/test/is_abs.yml
new file mode 100644
index 0000000..46f7f70
--- /dev/null
+++ b/lib/ansible/plugins/test/is_abs.yml
@@ -0,0 +1,23 @@
+DOCUMENTATION:
+ name: abs
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: is the path absolute
+ aliases: [is_abs]
+ description:
+ - Check if the provided path is absolute, not relative.
+ - An absolute path expresses the location of a filesystem object starting at the filesystem root and requires no context.
+ - A relative path does not start at the filesystem root and requires a 'current' directory as a context to resolve.
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ is_path_absolute: "{{ '/etc/hosts' is abs }}}"
+ relative_paths: "{{ all_paths | reject('abs') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path is absolute, C(False) if it is relative.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_dir.yml b/lib/ansible/plugins/test/is_dir.yml
new file mode 100644
index 0000000..5d7fa78
--- /dev/null
+++ b/lib/ansible/plugins/test/is_dir.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: directory
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to an existing directory
+ description:
+ - Check if the provided path maps to an existing directory on the controller's filesystem (localhost).
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ my_etc_hosts_not_a_dir: "{{ '/etc/hosts' is directory}}"
+ list_of_files: "{{ list_of_paths | reject('directory') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing directory on the filesystem on the controller, c(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_file.yml b/lib/ansible/plugins/test/is_file.yml
new file mode 100644
index 0000000..8b79c07
--- /dev/null
+++ b/lib/ansible/plugins/test/is_file.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: file
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to an existing file
+ description:
+ - Check if the provided path maps to an existing file on the controller's filesystem (localhost)
+ aliases: [is_file]
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ my_etc_hosts_is_a_file: "{{ '/etc/hosts' is file }}"
+ list_of_files: "{{ list_of_paths | select('file') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing file on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_link.yml b/lib/ansible/plugins/test/is_link.yml
new file mode 100644
index 0000000..27af41f
--- /dev/null
+++ b/lib/ansible/plugins/test/is_link.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: link
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path reference existing symbolic link
+ aliases: [islink]
+ description:
+ - Check if the provided path maps to an existing symlink on the controller's filesystem (localhost).
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ ismyhostsalink: "{{ '/etc/hosts' is link}}"
+ list_of_symlinks: "{{ list_of_paths | select('link') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing symlink on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_mount.yml b/lib/ansible/plugins/test/is_mount.yml
new file mode 100644
index 0000000..23f19b6
--- /dev/null
+++ b/lib/ansible/plugins/test/is_mount.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: mount
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to mount point
+ description:
+ - Check if the provided path maps to a filesystem mount point on the controller (localhost).
+ aliases: [is_mount]
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ ihopefalse: "{{ '/etc/hosts' is mount }}"
+ normallytrue: "{{ '/tmp' is mount }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to a mount point on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/is_same_file.yml b/lib/ansible/plugins/test/is_same_file.yml
new file mode 100644
index 0000000..a10a36a
--- /dev/null
+++ b/lib/ansible/plugins/test/is_same_file.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: same_file
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: compares two paths to see if they resolve to the same filesystem object
+ description: Check if the provided paths map to the same location on the controller's filesystem (localhost).
+ aliases: [is_file]
+ options:
+ _input:
+ description: A path.
+ type: path
+ required: true
+ _path2:
+ description: Another path.
+ type: path
+ required: true
+
+EXAMPLES: |
+ amionelevelfromroot: "{{ '/etc/hosts' is same_file('../etc/hosts') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the paths correspond to the same location on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/isnan.yml b/lib/ansible/plugins/test/isnan.yml
new file mode 100644
index 0000000..3c1055b
--- /dev/null
+++ b/lib/ansible/plugins/test/isnan.yml
@@ -0,0 +1,20 @@
+DOCUMENTATION:
+ name: nan
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: is this not a number (NaN)
+ description:
+ - Whether the input is a special floating point number called L(not a number, https://en.wikipedia.org/wiki/NaN).
+ aliases: [is_file]
+ options:
+ _input:
+ description: Possible number representation or string that can be converted into one.
+ type: raw
+ required: true
+EXAMPLES: |
+ isnan: "{{ '42' is nan }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the input is NaN, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/issubset.yml b/lib/ansible/plugins/test/issubset.yml
new file mode 100644
index 0000000..d57d05b
--- /dev/null
+++ b/lib/ansible/plugins/test/issubset.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: subset
+ author: Ansible Core
+ version_added: "2.4"
+ aliases: [issubset]
+ short_description: is the list a subset of this other list
+ description:
+ - Validate if the first list is a sub set (is included) of the second list.
+ - Same as the C(all) Python function.
+ options:
+ _input:
+ description: List.
+ type: list
+ elements: raw
+ required: True
+ _superset:
+ description: List to test against.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ big: [1,2,3,4,5]
+ sml: [3,4]
+ issmallinbig: '{{ small is subset(big) }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the specified list is a subset of the provided list, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/issuperset.yml b/lib/ansible/plugins/test/issuperset.yml
new file mode 100644
index 0000000..72be3d5
--- /dev/null
+++ b/lib/ansible/plugins/test/issuperset.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: superset
+ author: Ansible Core
+ version_added: "2.4"
+ short_description: is the list a superset of this other list
+ aliases: [issuperset]
+ description:
+ - Validate if the first list is a super set (includes) the second list.
+ - Same as the C(all) Python function.
+ options:
+ _input:
+ description: List.
+ type: list
+ elements: raw
+ required: True
+ _subset:
+ description: List to test against.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ big: [1,2,3,4,5]
+ sml: [3,4]
+ issmallinbig: '{{ big is superset(small) }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the specified list is a superset of the provided list, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/link.yml b/lib/ansible/plugins/test/link.yml
new file mode 100644
index 0000000..27af41f
--- /dev/null
+++ b/lib/ansible/plugins/test/link.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: link
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path reference existing symbolic link
+ aliases: [islink]
+ description:
+ - Check if the provided path maps to an existing symlink on the controller's filesystem (localhost).
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ ismyhostsalink: "{{ '/etc/hosts' is link}}"
+ list_of_symlinks: "{{ list_of_paths | select('link') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing symlink on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/link_exists.yml b/lib/ansible/plugins/test/link_exists.yml
new file mode 100644
index 0000000..f75a699
--- /dev/null
+++ b/lib/ansible/plugins/test/link_exists.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: link_exists
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path exist, no follow
+ description:
+ - Check if the provided path maps to an existing symlink on the controller's filesystem (localhost).
+ - Does not follow symlinks, so it only verifies that the link itself exists.
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ ismyhostsalink: "{{ '/etc/hosts' is link_exists}}"
+ list_of_symlinks: "{{ list_of_paths | select('link_exists') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to an existing filesystem object on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/match.yml b/lib/ansible/plugins/test/match.yml
new file mode 100644
index 0000000..ecb4ae6
--- /dev/null
+++ b/lib/ansible/plugins/test/match.yml
@@ -0,0 +1,32 @@
+DOCUMENTATION:
+ name: match
+ author: Ansible Core
+ short_description: Does string match regular expression from the start
+ description:
+ - Compare string against regular expression using Python's match function,
+ this means the regex is automatically anchored at the start of the string.
+ options:
+ _input:
+ description: String to match.
+ type: string
+ required: True
+ pattern:
+ description: Regex to match against.
+ type: string
+ required: True
+ ignorecase:
+ description: Use case insenstive matching.
+ type: boolean
+ default: False
+ multiline:
+ description: Match against mulitple lines in string.
+ type: boolean
+ default: False
+EXAMPLES: |
+ url: "https://example.com/users/foo/resources/bar"
+ foundmatch: url is match("https://example.com/users/.*/resources")
+ nomatch: url is match("/users/.*/resources")
+RETURN:
+ _value:
+ description: Returns C(True) if there is a match, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/mathstuff.py b/lib/ansible/plugins/test/mathstuff.py
new file mode 100644
index 0000000..9a3f467
--- /dev/null
+++ b/lib/ansible/plugins/test/mathstuff.py
@@ -0,0 +1,62 @@
+# (c) 2016, Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import math
+
+
+def issubset(a, b):
+ return set(a) <= set(b)
+
+
+def issuperset(a, b):
+ return set(a) >= set(b)
+
+
+def isnotanumber(x):
+ try:
+ return math.isnan(x)
+ except TypeError:
+ return False
+
+
+def contains(seq, value):
+ '''Opposite of the ``in`` test, allowing use as a test in filters like ``selectattr``
+
+ .. versionadded:: 2.8
+ '''
+ return value in seq
+
+
+class TestModule:
+ ''' Ansible math jinja2 tests '''
+
+ def tests(self):
+ return {
+ # set theory
+ 'subset': issubset,
+ 'issubset': issubset,
+ 'superset': issuperset,
+ 'issuperset': issuperset,
+ 'contains': contains,
+
+ # numbers
+ 'nan': isnotanumber,
+ 'isnan': isnotanumber,
+ }
diff --git a/lib/ansible/plugins/test/mount.yml b/lib/ansible/plugins/test/mount.yml
new file mode 100644
index 0000000..23f19b6
--- /dev/null
+++ b/lib/ansible/plugins/test/mount.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: mount
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: does the path resolve to mount point
+ description:
+ - Check if the provided path maps to a filesystem mount point on the controller (localhost).
+ aliases: [is_mount]
+ options:
+ _input:
+ description: A path.
+ type: path
+
+EXAMPLES: |
+ vars:
+ ihopefalse: "{{ '/etc/hosts' is mount }}"
+ normallytrue: "{{ '/tmp' is mount }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the path corresponds to a mount point on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/nan.yml b/lib/ansible/plugins/test/nan.yml
new file mode 100644
index 0000000..3c1055b
--- /dev/null
+++ b/lib/ansible/plugins/test/nan.yml
@@ -0,0 +1,20 @@
+DOCUMENTATION:
+ name: nan
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: is this not a number (NaN)
+ description:
+ - Whether the input is a special floating point number called L(not a number, https://en.wikipedia.org/wiki/NaN).
+ aliases: [is_file]
+ options:
+ _input:
+ description: Possible number representation or string that can be converted into one.
+ type: raw
+ required: true
+EXAMPLES: |
+ isnan: "{{ '42' is nan }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the input is NaN, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/reachable.yml b/lib/ansible/plugins/test/reachable.yml
new file mode 100644
index 0000000..8cb1ce3
--- /dev/null
+++ b/lib/ansible/plugins/test/reachable.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: reachable
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Task did not end due to unreachable host
+ description:
+ - Tests if task was able to reach the host for execution
+ - This test checks for the existance of a C(unreachable) key in the input dictionary and that it is C(False) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is reachable }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task did not flag the host as unreachable, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/regex.yml b/lib/ansible/plugins/test/regex.yml
new file mode 100644
index 0000000..90ca786
--- /dev/null
+++ b/lib/ansible/plugins/test/regex.yml
@@ -0,0 +1,37 @@
+DOCUMENTATION:
+ name: regex
+ author: Ansible Core
+ short_description: Does string match regular expression from the start
+ description:
+ - Compare string against regular expression using Python's match or search functions.
+ options:
+ _input:
+ description: String to match.
+ type: string
+ required: True
+ pattern:
+ description: Regex to match against.
+ type: string
+ required: True
+ ignorecase:
+ description: Use case insenstive matching.
+ type: boolean
+ default: False
+ multiline:
+ description: Match against multiple lines in string.
+ type: boolean
+ default: False
+ match_type:
+ description: Decide which function to be used to do the matching.
+ type: string
+ choices: [match, search]
+ default: search
+
+EXAMPLES: |
+ url: "https://example.com/users/foo/resources/bar"
+ foundmatch: url is regex("example\.com/\w+/foo")
+
+RETURN:
+ _value:
+ description: Returns C(True) if there is a match, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/same_file.yml b/lib/ansible/plugins/test/same_file.yml
new file mode 100644
index 0000000..a10a36a
--- /dev/null
+++ b/lib/ansible/plugins/test/same_file.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: same_file
+ author: Ansible Core
+ version_added: "2.5"
+ short_description: compares two paths to see if they resolve to the same filesystem object
+ description: Check if the provided paths map to the same location on the controller's filesystem (localhost).
+ aliases: [is_file]
+ options:
+ _input:
+ description: A path.
+ type: path
+ required: true
+ _path2:
+ description: Another path.
+ type: path
+ required: true
+
+EXAMPLES: |
+ amionelevelfromroot: "{{ '/etc/hosts' is same_file('../etc/hosts') }}"
+
+RETURN:
+ _value:
+ description: Returns C(True) if the paths correspond to the same location on the filesystem on the controller, C(False) if otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/search.yml b/lib/ansible/plugins/test/search.yml
new file mode 100644
index 0000000..4578bde
--- /dev/null
+++ b/lib/ansible/plugins/test/search.yml
@@ -0,0 +1,33 @@
+DOCUMENTATION:
+ name: search
+ author: Ansible Core
+ short_description: Does string match a regular expression
+ description:
+ - Compare string against regular expression using Python's C(search) function.
+ options:
+ _input:
+ description: String to match.
+ type: string
+ required: True
+ pattern:
+ description: Regex to match against.
+ type: string
+ required: True
+ ignorecase:
+ description: Use case insenstive matching.
+ type: boolean
+ default: False
+ multiline:
+ description: Match against mulitple lines in string.
+ type: boolean
+ default: False
+
+EXAMPLES: |
+ url: "https://example.com/users/foo/resources/bar"
+ foundmatch: url is search("https://example.com/users/.*/resources")
+ alsomatch: url is search("users/.*/resources")
+
+RETURN:
+ _value:
+ description: Returns C(True) if there is a match, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/skip.yml b/lib/ansible/plugins/test/skip.yml
new file mode 100644
index 0000000..9727172
--- /dev/null
+++ b/lib/ansible/plugins/test/skip.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: skipped
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Was task skipped
+ aliases: [skip]
+ description:
+ - Tests if task was skipped
+ - This test checks for the existance of a C(skipped) key in the input dictionary and that it is C(True) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is skipped}}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was skipped, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/skipped.yml b/lib/ansible/plugins/test/skipped.yml
new file mode 100644
index 0000000..9727172
--- /dev/null
+++ b/lib/ansible/plugins/test/skipped.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: skipped
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Was task skipped
+ aliases: [skip]
+ description:
+ - Tests if task was skipped
+ - This test checks for the existance of a C(skipped) key in the input dictionary and that it is C(True) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is skipped}}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was skipped, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/started.yml b/lib/ansible/plugins/test/started.yml
new file mode 100644
index 0000000..0cb0602
--- /dev/null
+++ b/lib/ansible/plugins/test/started.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: started
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Was async task started
+ description:
+ - Used to check if an async task has started, will also work with non async tasks but will issue a warning.
+ - This test checks for the existance of a C(started) key in the input dictionary and that it is C(1) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (asynctaskpoll is started}}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task has started, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/subset.yml b/lib/ansible/plugins/test/subset.yml
new file mode 100644
index 0000000..d57d05b
--- /dev/null
+++ b/lib/ansible/plugins/test/subset.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: subset
+ author: Ansible Core
+ version_added: "2.4"
+ aliases: [issubset]
+ short_description: is the list a subset of this other list
+ description:
+ - Validate if the first list is a sub set (is included) of the second list.
+ - Same as the C(all) Python function.
+ options:
+ _input:
+ description: List.
+ type: list
+ elements: raw
+ required: True
+ _superset:
+ description: List to test against.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ big: [1,2,3,4,5]
+ sml: [3,4]
+ issmallinbig: '{{ small is subset(big) }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the specified list is a subset of the provided list, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/succeeded.yml b/lib/ansible/plugins/test/succeeded.yml
new file mode 100644
index 0000000..4626f9f
--- /dev/null
+++ b/lib/ansible/plugins/test/succeeded.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: success
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: check task success
+ aliases: [succeeded, successful]
+ description:
+ - Tests if task finished successfully, opposite of C(failed).
+ - This test checks for the existance of a C(failed) key in the input dictionary and that it is C(False) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is success }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was successfully completed, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/success.yml b/lib/ansible/plugins/test/success.yml
new file mode 100644
index 0000000..4626f9f
--- /dev/null
+++ b/lib/ansible/plugins/test/success.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: success
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: check task success
+ aliases: [succeeded, successful]
+ description:
+ - Tests if task finished successfully, opposite of C(failed).
+ - This test checks for the existance of a C(failed) key in the input dictionary and that it is C(False) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is success }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was successfully completed, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/successful.yml b/lib/ansible/plugins/test/successful.yml
new file mode 100644
index 0000000..4626f9f
--- /dev/null
+++ b/lib/ansible/plugins/test/successful.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: success
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: check task success
+ aliases: [succeeded, successful]
+ description:
+ - Tests if task finished successfully, opposite of C(failed).
+ - This test checks for the existance of a C(failed) key in the input dictionary and that it is C(False) if present
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is success }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task was successfully completed, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/superset.yml b/lib/ansible/plugins/test/superset.yml
new file mode 100644
index 0000000..72be3d5
--- /dev/null
+++ b/lib/ansible/plugins/test/superset.yml
@@ -0,0 +1,28 @@
+DOCUMENTATION:
+ name: superset
+ author: Ansible Core
+ version_added: "2.4"
+ short_description: is the list a superset of this other list
+ aliases: [issuperset]
+ description:
+ - Validate if the first list is a super set (includes) the second list.
+ - Same as the C(all) Python function.
+ options:
+ _input:
+ description: List.
+ type: list
+ elements: raw
+ required: True
+ _subset:
+ description: List to test against.
+ type: list
+ elements: raw
+ required: True
+EXAMPLES: |
+ big: [1,2,3,4,5]
+ sml: [3,4]
+ issmallinbig: '{{ big is superset(small) }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the specified list is a superset of the provided list, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/truthy.yml b/lib/ansible/plugins/test/truthy.yml
new file mode 100644
index 0000000..01d5255
--- /dev/null
+++ b/lib/ansible/plugins/test/truthy.yml
@@ -0,0 +1,24 @@
+DOCUMENTATION:
+ name: truthy
+ author: Ansible Core
+ version_added: "2.10"
+ short_description: Pythonic true
+ description:
+ - This check is a more Python version of what is 'true'.
+ - It is the opposite of C(falsy).
+ options:
+ _input:
+ description: An expression that can be expressed in a boolean context.
+ type: string
+ required: True
+ convert_bool:
+ description: Attempts to convert to strict python boolean vs normally acceptable values (C(yes)/C(no), C(on)/C(off), C(0)/C(1), etc).
+ type: bool
+ default: false
+EXAMPLES: |
+ thisistrue: '{{ "any string" is truthy }}'
+ thisisfalse: '{{ "" is truthy }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the condition is not "Python truthy", C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/unreachable.yml b/lib/ansible/plugins/test/unreachable.yml
new file mode 100644
index 0000000..ed6c17e
--- /dev/null
+++ b/lib/ansible/plugins/test/unreachable.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: unreachable
+ author: Ansible Core
+ version_added: "1.9"
+ short_description: Did task end due to the host was unreachable
+ description:
+ - Tests if task was not able to reach the host for execution
+ - This test checks for the existance of a C(unreachable) key in the input dictionary and that it's value is C(True)
+ options:
+ _input:
+ description: registered result from an Ansible task
+ type: dictionary
+ required: True
+EXAMPLES: |
+ # test 'status' to know how to respond
+ {{ (taskresults is unreachable }}
+
+RETURN:
+ _value:
+ description: Returns C(True) if the task flagged the host as unreachable, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/uri.py b/lib/ansible/plugins/test/uri.py
new file mode 100644
index 0000000..7ef3381
--- /dev/null
+++ b/lib/ansible/plugins/test/uri.py
@@ -0,0 +1,46 @@
+# (c) Ansible Project
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from urllib.parse import urlparse
+
+
+def is_uri(value, schemes=None):
+ ''' Will verify that the string passed is a valid 'URI', if given a list of valid schemes it will match those '''
+ try:
+ x = urlparse(value)
+ isit = all([x.scheme is not None, x.path is not None, not schemes or x.scheme in schemes])
+ except Exception as e:
+ isit = False
+ return isit
+
+
+def is_url(value, schemes=None):
+ ''' Will verify that the string passed is a valid 'URL' '''
+
+ isit = is_uri(value, schemes)
+ if isit:
+ try:
+ x = urlparse(value)
+ isit = bool(x.netloc or x.scheme == 'file')
+ except Exception as e:
+ isit = False
+ return isit
+
+
+def is_urn(value):
+ return is_uri(value, ['urn'])
+
+
+class TestModule(object):
+ ''' Ansible URI jinja2 test '''
+
+ def tests(self):
+ return {
+ # file testing
+ 'uri': is_uri,
+ 'url': is_url,
+ 'urn': is_urn,
+ }
diff --git a/lib/ansible/plugins/test/uri.yml b/lib/ansible/plugins/test/uri.yml
new file mode 100644
index 0000000..bb3b8bd
--- /dev/null
+++ b/lib/ansible/plugins/test/uri.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: uri
+ author: Ansible Core
+ version_added: "2.14"
+ short_description: is the string a valid URI
+ description:
+ - Validates that the input string conforms to the URI standard, optionally that is also in the list of schemas provided.
+ options:
+ _input:
+ description: Possible URI.
+ type: string
+ required: True
+ schemes:
+ description: Subset of URI schemas to validate against, otherwise B(any) scheme is considered valid.
+ type: list
+ elements: string
+ required: False
+EXAMPLES: |
+ # URLs are URIs
+ {{ 'http://example.com' is uri }}
+ # but not all URIs are URLs
+ {{ 'mailto://nowone@example.com' is uri }}
+ # looking only for file transfers URIs
+ {{ 'mailto://nowone@example.com' is not uri(schemes=['ftp', 'ftps', 'sftp', 'file']) }}
+ # make sure URL conforms to the 'special schemas'
+ {{ 'http://nobody:secret@example.com' is uri(['ftp', 'ftps', 'http', 'https', 'ws', 'wss']) }}
+RETURN:
+ _value:
+ description: Returns C(false) if the string is not a URI or the schema extracted does not match the supplied list.
+ type: boolean
diff --git a/lib/ansible/plugins/test/url.yml b/lib/ansible/plugins/test/url.yml
new file mode 100644
index 0000000..36b6c77
--- /dev/null
+++ b/lib/ansible/plugins/test/url.yml
@@ -0,0 +1,29 @@
+DOCUMENTATION:
+ name: url
+ author: Ansible Core
+ version_added: "2.14"
+ short_description: is the string a valid URL
+ description:
+ - Validates a string to conform to the URL standard.
+ options:
+ _input:
+ description: Possible URL.
+ type: string
+ required: True
+ schemes:
+ description: Subset of URI schemas to validate against, otherwise B(any) scheme is considered valid.
+ type: list
+ elements: string
+EXAMPLES: |
+ # simple URL
+ {{ 'http://example.com' is url }}
+ # looking only for file transfers URIs
+ {{ 'mailto://nowone@example.com' is not uri(schemes=['ftp', 'ftps', 'sftp', 'file']) }}
+ # but it is according to standard
+ {{ 'mailto://nowone@example.com' is not uri }}
+ # more complex URL
+ {{ 'ftp://admin:secret@example.com/path/to/myfile.yml' is url }}
+RETURN:
+ _value:
+ description: Returns C(false) if the string is not a URL, C(true) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/urn.yml b/lib/ansible/plugins/test/urn.yml
new file mode 100644
index 0000000..81a6686
--- /dev/null
+++ b/lib/ansible/plugins/test/urn.yml
@@ -0,0 +1,21 @@
+DOCUMENTATION:
+ name: urn
+ author: Ansible Core
+ version_added: "2.14"
+ short_description: is the string a valid URN
+ description:
+ - Validates that the input string conforms to the URN standard.
+ options:
+ _input:
+ description: Possible URN.
+ type: string
+ required: True
+EXAMPLES: |
+ # ISBN in URN format
+ {{ 'urn:isbn:9780302376463' is urn }}
+ # this is URL/URI but not URN
+ {{ 'mailto://nowone@example.com' is not urn }}
+RETURN:
+ _value:
+ description: Returns C(true) if the string is a URN and C(false) if it is not.
+ type: boolean
diff --git a/lib/ansible/plugins/test/vault_encrypted.yml b/lib/ansible/plugins/test/vault_encrypted.yml
new file mode 100644
index 0000000..58d79f1
--- /dev/null
+++ b/lib/ansible/plugins/test/vault_encrypted.yml
@@ -0,0 +1,19 @@
+DOCUMENTATION:
+ name: truthy
+ author: Ansible Core
+ version_added: "2.10"
+ short_description: Is this an encrypted vault
+ description:
+ - Verifies if the input is an Ansible vault.
+ options:
+ _input:
+ description: The possible vault.
+ type: string
+ required: True
+EXAMPLES: |
+ thisisfalse: '{{ "any string" is ansible_vault }}'
+ thisistrue: '{{ "$ANSIBLE_VAULT;1.2;AES256;dev...." is ansible_vault }}'
+RETURN:
+ _value:
+ description: Returns C(True) if the input is a valid ansible vault, C(False) otherwise.
+ type: boolean
diff --git a/lib/ansible/plugins/test/version.yml b/lib/ansible/plugins/test/version.yml
new file mode 100644
index 0000000..92b6048
--- /dev/null
+++ b/lib/ansible/plugins/test/version.yml
@@ -0,0 +1,82 @@
+DOCUMENTATION:
+ name: version
+ author: Ansible Core
+ version_added: "1.6"
+ short_description: compare version strings
+ aliases: [version_compare]
+ description:
+ - Compare version strings using various versioning schemes
+ options:
+ _input:
+ description: Left hand version to compare
+ type: string
+ required: True
+ version:
+ description: Right hand version to compare
+ type: string
+ required: True
+ operator:
+ description: Comparison operator
+ type: string
+ required: False
+ choices:
+ - ==
+ - '='
+ - eq
+ - <
+ - lt
+ - <=
+ - le
+ - '>'
+ - gt
+ - '>='
+ - ge
+ - '!='
+ - <>
+ - ne
+ default: eq
+ strict:
+ description: Whether to use strict version scheme. Mutually exclusive with C(version_type)
+ type: boolean
+ required: False
+ default: False
+ version_type:
+ description: Version scheme to use for comparison. Mutually exclusive with C(strict). See C(notes) for descriptions on the version types.
+ type: string
+ required: False
+ choices:
+ - loose
+ - strict
+ - semver
+ - semantic
+ - pep440
+ default: loose
+ notes:
+ - C(loose) - This type corresponds to the Python C(distutils.version.LooseVersion) class. All version formats are valid for this type. The rules for comparison are simple and predictable, but may not always give expected results.
+ - C(strict) - This type corresponds to the Python C(distutils.version.StrictVersion) class. A version number consists of two or three dot-separated numeric components, with an optional "pre-release" tag on the end. The pre-release tag consists of a single letter C(a) or C(b) followed by a number. If the numeric components of two version numbers are equal, then one with a pre-release tag will always be deemed earlier (lesser) than one without.
+ - C(semver)/C(semantic) - This type implements the L(Semantic Version,https://semver.org) scheme for version comparison.
+ - C(pep440) - This type implements the Python L(PEP-440,https://peps.python.org/pep-0440/) versioning rules for version comparison. Added in version 2.14.
+EXAMPLES: |
+ - name: version test examples
+ assert:
+ that:
+ - "'1.0' is version_compare('1.0', '==')" # old name
+ - "'1.0' is version('1.0', '==')"
+ - "'1.0' is version('2.0', '!=')"
+ - "'1.0' is version('2.0', '<')"
+ - "'2.0' is version('1.0', '>')"
+ - "'1.0' is version('1.0', '<=')"
+ - "'1.0' is version('1.0', '>=')"
+ - "'1.0' is version_compare('1.0', '==', strict=true)" # old name
+ - "'1.0' is version('1.0', '==', strict=true)"
+ - "'1.0' is version('2.0', '!=', strict=true)"
+ - "'1.0' is version('2.0', '<', strict=true)"
+ - "'2.0' is version('1.0', '>', strict=true)"
+ - "'1.0' is version('1.0', '<=', strict=true)"
+ - "'1.0' is version('1.0', '>=', strict=true)"
+ - "'1.2.3' is version('2.0.0', 'lt', version_type='semver')"
+ - "'2.14.0rc1' is version('2.14.0', 'lt', version_type='pep440')"
+RETURN:
+ _value:
+ description: Returns C(True) or C(False) depending on the outcome of the comparison.
+ type: boolean
diff --git a/lib/ansible/plugins/test/version_compare.yml b/lib/ansible/plugins/test/version_compare.yml
new file mode 100644
index 0000000..92b6048
--- /dev/null
+++ b/lib/ansible/plugins/test/version_compare.yml
@@ -0,0 +1,82 @@
+DOCUMENTATION:
+ name: version
+ author: Ansible Core
+ version_added: "1.6"
+ short_description: compare version strings
+ aliases: [version_compare]
+ description:
+ - Compare version strings using various versioning schemes
+ options:
+ _input:
+ description: Left hand version to compare
+ type: string
+ required: True
+ version:
+ description: Right hand version to compare
+ type: string
+ required: True
+ operator:
+ description: Comparison operator
+ type: string
+ required: False
+ choices:
+ - ==
+ - '='
+ - eq
+ - <
+ - lt
+ - <=
+ - le
+ - '>'
+ - gt
+ - '>='
+ - ge
+ - '!='
+ - <>
+ - ne
+ default: eq
+ strict:
+ description: Whether to use strict version scheme. Mutually exclusive with C(version_type)
+ type: boolean
+ required: False
+ default: False
+ version_type:
+ description: Version scheme to use for comparison. Mutually exclusive with C(strict). See C(notes) for descriptions on the version types.
+ type: string
+ required: False
+ choices:
+ - loose
+ - strict
+ - semver
+ - semantic
+ - pep440
+ default: loose
+ notes:
+ - C(loose) - This type corresponds to the Python C(distutils.version.LooseVersion) class. All version formats are valid for this type. The rules for comparison are simple and predictable, but may not always give expected results.
+ - C(strict) - This type corresponds to the Python C(distutils.version.StrictVersion) class. A version number consists of two or three dot-separated numeric components, with an optional "pre-release" tag on the end. The pre-release tag consists of a single letter C(a) or C(b) followed by a number. If the numeric components of two version numbers are equal, then one with a pre-release tag will always be deemed earlier (lesser) than one without.
+ - C(semver)/C(semantic) - This type implements the L(Semantic Version,https://semver.org) scheme for version comparison.
+ - C(pep440) - This type implements the Python L(PEP-440,https://peps.python.org/pep-0440/) versioning rules for version comparison. Added in version 2.14.
+EXAMPLES: |
+ - name: version test examples
+ assert:
+ that:
+ - "'1.0' is version_compare('1.0', '==')" # old name
+ - "'1.0' is version('1.0', '==')"
+ - "'1.0' is version('2.0', '!=')"
+ - "'1.0' is version('2.0', '<')"
+ - "'2.0' is version('1.0', '>')"
+ - "'1.0' is version('1.0', '<=')"
+ - "'1.0' is version('1.0', '>=')"
+ - "'1.0' is version_compare('1.0', '==', strict=true)" # old name
+ - "'1.0' is version('1.0', '==', strict=true)"
+ - "'1.0' is version('2.0', '!=', strict=true)"
+ - "'1.0' is version('2.0', '<', strict=true)"
+ - "'2.0' is version('1.0', '>', strict=true)"
+ - "'1.0' is version('1.0', '<=', strict=true)"
+ - "'1.0' is version('1.0', '>=', strict=true)"
+ - "'1.2.3' is version('2.0.0', 'lt', version_type='semver')"
+ - "'2.14.0rc1' is version('2.14.0', 'lt', version_type='pep440')"
+RETURN:
+ _value:
+ description: Returns C(True) or C(False) depending on the outcome of the comparison.
+ type: boolean
diff --git a/lib/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py
new file mode 100644
index 0000000..2a7bafd
--- /dev/null
+++ b/lib/ansible/plugins/vars/__init__.py
@@ -0,0 +1,41 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.path import basedir
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class BaseVarsPlugin(AnsiblePlugin):
+
+ """
+ Loads variables for groups and/or hosts
+ """
+
+ def __init__(self):
+ """ constructor """
+ super(BaseVarsPlugin, self).__init__()
+ self._display = display
+
+ def get_vars(self, loader, path, entities):
+ """ Gets variables. """
+ self._basedir = basedir(path)
diff --git a/lib/ansible/plugins/vars/host_group_vars.py b/lib/ansible/plugins/vars/host_group_vars.py
new file mode 100644
index 0000000..521b3b6
--- /dev/null
+++ b/lib/ansible/plugins/vars/host_group_vars.py
@@ -0,0 +1,116 @@
+# Copyright 2017 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: host_group_vars
+ version_added: "2.4"
+ short_description: In charge of loading group_vars and host_vars
+ requirements:
+ - Enabled in configuration
+ description:
+ - Loads YAML vars into corresponding groups/hosts in group_vars/ and host_vars/ directories.
+ - Files are restricted by extension to one of .yaml, .json, .yml or no extension.
+ - Hidden (starting with '.') and backup (ending with '~') files and directories are ignored.
+ - Only applies to inventory sources that are existing paths.
+ - Starting in 2.10, this plugin requires enabling and is enabled by default.
+ options:
+ stage:
+ ini:
+ - key: stage
+ section: vars_host_group_vars
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+ _valid_extensions:
+ default: [".yml", ".yaml", ".json"]
+ description:
+ - "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
+ - 'This affects vars_files, include_vars, inventory and vars plugins among others.'
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ ini:
+ - key: yaml_valid_extensions
+ section: defaults
+ type: list
+ elements: string
+ extends_documentation_fragment:
+ - vars_plugin_staging
+'''
+
+import os
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.vars import BaseVarsPlugin
+from ansible.inventory.host import Host
+from ansible.inventory.group import Group
+from ansible.utils.vars import combine_vars
+
+FOUND = {} # type: dict[str, list[str]]
+
+
+class VarsModule(BaseVarsPlugin):
+
+ REQUIRES_ENABLED = True
+
+ def get_vars(self, loader, path, entities, cache=True):
+ ''' parses the inventory file '''
+
+ if not isinstance(entities, list):
+ entities = [entities]
+
+ super(VarsModule, self).get_vars(loader, path, entities)
+
+ data = {}
+ for entity in entities:
+ if isinstance(entity, Host):
+ subdir = 'host_vars'
+ elif isinstance(entity, Group):
+ subdir = 'group_vars'
+ else:
+ raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
+
+ # avoid 'chroot' type inventory hostnames /path/to/chroot
+ if not entity.name.startswith(os.path.sep):
+ try:
+ found_files = []
+ # load vars
+ b_opath = os.path.realpath(to_bytes(os.path.join(self._basedir, subdir)))
+ opath = to_text(b_opath)
+ key = '%s.%s' % (entity.name, opath)
+ if cache and key in FOUND:
+ found_files = FOUND[key]
+ else:
+ # no need to do much if path does not exist for basedir
+ if os.path.exists(b_opath):
+ if os.path.isdir(b_opath):
+ self._display.debug("\tprocessing dir %s" % opath)
+ found_files = loader.find_vars_files(opath, entity.name)
+ FOUND[key] = found_files
+ else:
+ self._display.warning("Found %s that is not a directory, skipping: %s" % (subdir, opath))
+
+ for found in found_files:
+ new_data = loader.load_from_file(found, cache=True, unsafe=True)
+ if new_data: # ignore empty files
+ data = combine_vars(data, new_data)
+
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+ return data
diff --git a/lib/ansible/release.py b/lib/ansible/release.py
new file mode 100644
index 0000000..66a04b9
--- /dev/null
+++ b/lib/ansible/release.py
@@ -0,0 +1,24 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__version__ = '2.14.3'
+__author__ = 'Ansible, Inc.'
+__codename__ = "C'mon Everybody"
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
new file mode 100644
index 0000000..1498d3f
--- /dev/null
+++ b/lib/ansible/template/__init__.py
@@ -0,0 +1,1027 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import datetime
+import os
+import pwd
+import re
+import time
+
+from collections.abc import Iterator, Sequence, Mapping, MappingView, MutableMapping
+from contextlib import contextmanager
+from numbers import Number
+from traceback import format_exc
+
+from jinja2.exceptions import TemplateSyntaxError, UndefinedError
+from jinja2.loaders import FileSystemLoader
+from jinja2.nativetypes import NativeEnvironment
+from jinja2.runtime import Context, StrictUndefined
+
+from ansible import constants as C
+from ansible.errors import (
+ AnsibleAssertionError,
+ AnsibleError,
+ AnsibleFilterError,
+ AnsibleLookupError,
+ AnsibleOptionsError,
+ AnsibleUndefinedVariable,
+)
+from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.module_utils.common.collections import is_sequence
+from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
+from ansible.template.native_helpers import ansible_native_concat, ansible_eval_concat, ansible_concat
+from ansible.template.template import AnsibleJ2Template
+from ansible.template.vars import AnsibleJ2Vars
+from ansible.utils.display import Display
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.native_jinja import NativeJinjaText
+from ansible.utils.unsafe_proxy import wrap_var
+
+display = Display()
+
+
+__all__ = ['Templar', 'generate_ansible_template_vars']
+
+# Primitive Types which we don't want Jinja to convert to strings.
+NON_TEMPLATED_TYPES = (bool, Number)
+
+JINJA2_OVERRIDE = '#jinja2:'
+
+JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
+JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))
+
+RANGE_TYPE = type(range(0))
+
+
+def generate_ansible_template_vars(path, fullpath=None, dest_path=None):
+
+ if fullpath is None:
+ b_path = to_bytes(path)
+ else:
+ b_path = to_bytes(fullpath)
+
+ try:
+ template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
+ except (KeyError, TypeError):
+ template_uid = os.stat(b_path).st_uid
+
+ temp_vars = {
+ 'template_host': to_text(os.uname()[1]),
+ 'template_path': path,
+ 'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
+ 'template_uid': to_text(template_uid),
+ 'template_run_date': datetime.datetime.now(),
+ 'template_destpath': to_native(dest_path) if dest_path else None,
+ }
+
+ if fullpath is None:
+ temp_vars['template_fullpath'] = os.path.abspath(path)
+ else:
+ temp_vars['template_fullpath'] = fullpath
+
+ managed_default = C.DEFAULT_MANAGED_STR
+ managed_str = managed_default.format(
+ host=temp_vars['template_host'],
+ uid=temp_vars['template_uid'],
+ file=temp_vars['template_path'],
+ )
+ temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
+
+ return temp_vars
+
+
+def _escape_backslashes(data, jinja_env):
+ """Double backslashes within jinja2 expressions
+
+ A user may enter something like this in a playbook::
+
+ debug:
+ msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
+
+ The string inside of the {{ gets interpreted multiple times First by yaml.
+ Then by python. And finally by jinja2 as part of it's variable. Because
+ it is processed by both python and jinja2, the backslash escaped
+ characters get unescaped twice. This means that we'd normally have to use
+ four backslashes to escape that. This is painful for playbook authors as
+ they have to remember different rules for inside vs outside of a jinja2
+ expression (The backslashes outside of the "{{ }}" only get processed by
+ yaml and python. So they only need to be escaped once). The following
+ code fixes this by automatically performing the extra quoting of
+ backslashes inside of a jinja2 expression.
+
+ """
+ if '\\' in data and '{{' in data:
+ new_data = []
+ d2 = jinja_env.preprocess(data)
+ in_var = False
+
+ for token in jinja_env.lex(d2):
+ if token[1] == 'variable_begin':
+ in_var = True
+ new_data.append(token[2])
+ elif token[1] == 'variable_end':
+ in_var = False
+ new_data.append(token[2])
+ elif in_var and token[1] == 'string':
+ # Double backslashes only if we're inside of a jinja2 variable
+ new_data.append(token[2].replace('\\', '\\\\'))
+ else:
+ new_data.append(token[2])
+
+ data = ''.join(new_data)
+
+ return data
+
+
+def is_possibly_template(data, jinja_env):
+ """Determines if a string looks like a template, by seeing if it
+ contains a jinja2 start delimiter. Does not guarantee that the string
+ is actually a template.
+
+ This is different than ``is_template`` which is more strict.
+ This method may return ``True`` on a string that is not templatable.
+
+ Useful when guarding passing a string for templating, but when
+ you want to allow the templating engine to make the final
+ assessment which may result in ``TemplateSyntaxError``.
+ """
+ if isinstance(data, string_types):
+ for marker in (jinja_env.block_start_string, jinja_env.variable_start_string, jinja_env.comment_start_string):
+ if marker in data:
+ return True
+ return False
+
+
+def is_template(data, jinja_env):
+ """This function attempts to quickly detect whether a value is a jinja2
+ template. To do so, we look for the first 2 matching jinja2 tokens for
+ start and end delimiters.
+ """
+ found = None
+ start = True
+ comment = False
+ d2 = jinja_env.preprocess(data)
+
+ # Quick check to see if this is remotely like a template before doing
+ # more expensive investigation.
+ if not is_possibly_template(d2, jinja_env):
+ return False
+
+ # This wraps a lot of code, but this is due to lex returning a generator
+ # so we may get an exception at any part of the loop
+ try:
+ for token in jinja_env.lex(d2):
+ if token[1] in JINJA2_BEGIN_TOKENS:
+ if start and token[1] == 'comment_begin':
+ # Comments can wrap other token types
+ comment = True
+ start = False
+ # Example: variable_end -> variable
+ found = token[1].split('_')[0]
+ elif token[1] in JINJA2_END_TOKENS:
+ if token[1].split('_')[0] == found:
+ return True
+ elif comment:
+ continue
+ return False
+ except TemplateSyntaxError:
+ return False
+
+ return False
+
+
+def _count_newlines_from_end(in_str):
+ '''
+ Counts the number of newlines at the end of a string. This is used during
+ the jinja2 templating to ensure the count matches the input, since some newlines
+ may be thrown away during the templating.
+ '''
+
+ try:
+ i = len(in_str)
+ j = i - 1
+ while in_str[j] == '\n':
+ j -= 1
+ return i - 1 - j
+ except IndexError:
+ # Uncommon cases: zero length string and string containing only newlines
+ return i
+
+
+def recursive_check_defined(item):
+ from jinja2.runtime import Undefined
+
+ if isinstance(item, MutableMapping):
+ for key in item:
+ recursive_check_defined(item[key])
+ elif isinstance(item, list):
+ for i in item:
+ recursive_check_defined(i)
+ else:
+ if isinstance(item, Undefined):
+ raise AnsibleFilterError("{0} is undefined".format(item))
+
+
+def _is_rolled(value):
+ """Helper method to determine if something is an unrolled generator,
+ iterator, or similar object
+ """
+ return (
+ isinstance(value, Iterator) or
+ isinstance(value, MappingView) or
+ isinstance(value, RANGE_TYPE)
+ )
+
+
+def _unroll_iterator(func):
+ """Wrapper function, that intercepts the result of a templating
+ and auto unrolls a generator, so that users are not required to
+ explicitly use ``|list`` to unroll.
+ """
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ if _is_rolled(ret):
+ return list(ret)
+ return ret
+
+ return _update_wrapper(wrapper, func)
+
+
+def _update_wrapper(wrapper, func):
+ # This code is duplicated from ``functools.update_wrapper`` from Py3.7.
+ # ``functools.update_wrapper`` was failing when the func was ``functools.partial``
+ for attr in ('__module__', '__name__', '__qualname__', '__doc__', '__annotations__'):
+ try:
+ value = getattr(func, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(wrapper, attr, value)
+ for attr in ('__dict__',):
+ getattr(wrapper, attr).update(getattr(func, attr, {}))
+ wrapper.__wrapped__ = func
+ return wrapper
+
+
+def _wrap_native_text(func):
+ """Wrapper function, that intercepts the result of a filter
+ and wraps it into NativeJinjaText which is then used
+ in ``ansible_native_concat`` to indicate that it is a text
+ which should not be passed into ``literal_eval``.
+ """
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ return NativeJinjaText(ret)
+
+ return _update_wrapper(wrapper, func)
+
+
+class AnsibleUndefined(StrictUndefined):
+ '''
+ A custom Undefined class, which returns further Undefined objects on access,
+ rather than throwing an exception.
+ '''
+ def __getattr__(self, name):
+ if name == '__UNSAFE__':
+ # AnsibleUndefined should never be assumed to be unsafe
+ # This prevents ``hasattr(val, '__UNSAFE__')`` from evaluating to ``True``
+ raise AttributeError(name)
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+ def __getitem__(self, key):
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+ def __repr__(self):
+ return 'AnsibleUndefined(hint={0!r}, obj={1!r}, name={2!r})'.format(
+ self._undefined_hint,
+ self._undefined_obj,
+ self._undefined_name
+ )
+
+ def __contains__(self, item):
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+
+class AnsibleContext(Context):
+ '''
+ A custom context, which intercepts resolve_or_missing() calls and sets a flag
+ internally if any variable lookup returns an AnsibleUnsafe value. This
+ flag is checked post-templating, and (when set) will result in the
+ final templated result being wrapped in AnsibleUnsafe.
+ '''
+ def __init__(self, *args, **kwargs):
+ super(AnsibleContext, self).__init__(*args, **kwargs)
+ self.unsafe = False
+
+ def _is_unsafe(self, val):
+ '''
+ Our helper function, which will also recursively check dict and
+ list entries due to the fact that they may be repr'd and contain
+ a key or value which contains jinja2 syntax and would otherwise
+ lose the AnsibleUnsafe value.
+ '''
+ if isinstance(val, dict):
+ for key in val.keys():
+ if self._is_unsafe(val[key]):
+ return True
+ elif isinstance(val, list):
+ for item in val:
+ if self._is_unsafe(item):
+ return True
+ elif getattr(val, '__UNSAFE__', False) is True:
+ return True
+ return False
+
+ def _update_unsafe(self, val):
+ if val is not None and not self.unsafe and self._is_unsafe(val):
+ self.unsafe = True
+
+ def resolve_or_missing(self, key):
+ val = super(AnsibleContext, self).resolve_or_missing(key)
+ self._update_unsafe(val)
+ return val
+
+ def get_all(self):
+ """Return the complete context as a dict including the exported
+ variables. For optimizations reasons this might not return an
+ actual copy so be careful with using it.
+
+ This is to prevent from running ``AnsibleJ2Vars`` through dict():
+
+ ``dict(self.parent, **self.vars)``
+
+ In Ansible this means that ALL variables would be templated in the
+ process of re-creating the parent because ``AnsibleJ2Vars`` templates
+ each variable in its ``__getitem__`` method. Instead we re-create the
+ parent via ``AnsibleJ2Vars.add_locals`` that creates a new
+ ``AnsibleJ2Vars`` copy without templating each variable.
+
+ This will prevent unnecessarily templating unused variables in cases
+ like setting a local variable and passing it to {% include %}
+ in a template.
+
+ Also see ``AnsibleJ2Template``and
+ https://github.com/pallets/jinja/commit/d67f0fd4cc2a4af08f51f4466150d49da7798729
+ """
+ if not self.vars:
+ return self.parent
+ if not self.parent:
+ return self.vars
+
+ if isinstance(self.parent, AnsibleJ2Vars):
+ return self.parent.add_locals(self.vars)
+ else:
+ # can this happen in Ansible?
+ return dict(self.parent, **self.vars)
+
+
+class JinjaPluginIntercept(MutableMapping):
+ ''' Simulated dict class that loads Jinja2Plugins at request
+ otherwise all plugins would need to be loaded a priori.
+
+ NOTE: plugin_loader still loads all 'builtin/legacy' at
+ start so only collection plugins are really at request.
+ '''
+
+ def __init__(self, delegatee, pluginloader, *args, **kwargs):
+
+ super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
+
+ self._pluginloader = pluginloader
+
+ # cache of resolved plugins
+ self._delegatee = delegatee
+
+ # track loaded plugins here as cache above includes 'jinja2' filters but ours should override
+ self._loaded_builtins = set()
+
+ def __getitem__(self, key):
+
+ if not isinstance(key, string_types):
+ raise ValueError('key must be a string, got %s instead' % type(key))
+
+ original_exc = None
+ if key not in self._loaded_builtins:
+ plugin = None
+ try:
+ plugin = self._pluginloader.get(key)
+ except (AnsibleError, KeyError) as e:
+ original_exc = e
+ except Exception as e:
+ display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e)))
+ raise e
+
+ # if a plugin was found/loaded
+ if plugin:
+ # set in filter cache and avoid expensive plugin load
+ self._delegatee[key] = plugin.j2_function
+ self._loaded_builtins.add(key)
+
+ # raise template syntax error if we could not find ours or jinja2 one
+ try:
+ func = self._delegatee[key]
+ except KeyError as e:
+ raise TemplateSyntaxError('Could not load "%s": %s' % (key, to_native(original_exc or e)), 0)
+
+ # if i do have func and it is a filter, it nees wrapping
+ if self._pluginloader.type == 'filter':
+ # filter need wrapping
+ if key in C.STRING_TYPE_FILTERS:
+ # avoid litera_eval when you WANT strings
+ func = _wrap_native_text(func)
+ else:
+ # conditionally unroll iterators/generators to avoid having to use `|list` after every filter
+ func = _unroll_iterator(func)
+
+ return func
+
+ def __setitem__(self, key, value):
+ return self._delegatee.__setitem__(key, value)
+
+ def __delitem__(self, key):
+ raise NotImplementedError()
+
+ def __iter__(self):
+ # not strictly accurate since we're not counting dynamically-loaded values
+ return iter(self._delegatee)
+
+ def __len__(self):
+ # not strictly accurate since we're not counting dynamically-loaded values
+ return len(self._delegatee)
+
+
+def _fail_on_undefined(data):
+ """Recursively find an undefined value in a nested data structure
+ and properly raise the undefined exception.
+ """
+ if isinstance(data, Mapping):
+ for value in data.values():
+ _fail_on_undefined(value)
+ elif is_sequence(data):
+ for item in data:
+ _fail_on_undefined(item)
+ else:
+ if isinstance(data, StrictUndefined):
+ # To actually raise the undefined exception we need to
+ # access the undefined object otherwise the exception would
+ # be raised on the next access which might not be properly
+ # handled.
+ # See https://github.com/ansible/ansible/issues/52158
+ # and StrictUndefined implementation in upstream Jinja2.
+ str(data)
+ return data
+
+
+@_unroll_iterator
+def _ansible_finalize(thing):
+ """A custom finalize function for jinja2, which prevents None from being
+ returned. This avoids a string of ``"None"`` as ``None`` has no
+ importance in YAML.
+
+ The function is decorated with ``_unroll_iterator`` so that users are not
+ required to explicitly use ``|list`` to unroll a generator. This only
+ affects the scenario where the final result of templating
+ is a generator, e.g. ``range``, ``dict.items()`` and so on. Filters
+ which can produce a generator in the middle of a template are already
+ wrapped with ``_unroll_generator`` in ``JinjaPluginIntercept``.
+ """
+ return thing if _fail_on_undefined(thing) is not None else ''
+
+
+class AnsibleEnvironment(NativeEnvironment):
+ '''
+ Our custom environment, which simply allows us to override the class-level
+ values for the Template and Context classes used by jinja2 internally.
+ '''
+ context_class = AnsibleContext
+ template_class = AnsibleJ2Template
+ concat = staticmethod(ansible_eval_concat)
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self.filters = JinjaPluginIntercept(self.filters, filter_loader)
+ self.tests = JinjaPluginIntercept(self.tests, test_loader)
+
+ self.trim_blocks = True
+
+ self.undefined = AnsibleUndefined
+ self.finalize = _ansible_finalize
+
+
+class AnsibleNativeEnvironment(AnsibleEnvironment):
+ concat = staticmethod(ansible_native_concat)
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.finalize = _unroll_iterator(_fail_on_undefined)
+
+
+class Templar:
+ '''
+ The main class for templating, with the main entry-point of template().
+ '''
+
+ def __init__(self, loader, shared_loader_obj=None, variables=None):
+ if shared_loader_obj is not None:
+ display.deprecated(
+ "The `shared_loader_obj` option to `Templar` is no longer functional, "
+ "ansible.plugins.loader is used directly instead.",
+ version='2.16',
+ )
+
+ self._loader = loader
+ self._available_variables = {} if variables is None else variables
+
+ self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
+
+ environment_class = AnsibleNativeEnvironment if C.DEFAULT_JINJA2_NATIVE else AnsibleEnvironment
+
+ self.environment = environment_class(
+ extensions=self._get_extensions(),
+ loader=FileSystemLoader(loader.get_basedir() if loader else '.'),
+ )
+ self.environment.template_class.environment_class = environment_class
+
+ # jinja2 global is inconsistent across versions, this normalizes them
+ self.environment.globals['dict'] = dict
+
+ # Custom globals
+ self.environment.globals['lookup'] = self._lookup
+ self.environment.globals['query'] = self.environment.globals['q'] = self._query_lookup
+ self.environment.globals['now'] = self._now_datetime
+ self.environment.globals['undef'] = self._make_undefined
+
+ # the current rendering context under which the templar class is working
+ self.cur_context = None
+
+ # FIXME this regex should be re-compiled each time variable_start_string and variable_end_string are changed
+ self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
+
+ self.jinja2_native = C.DEFAULT_JINJA2_NATIVE
+
+ def copy_with_new_env(self, environment_class=AnsibleEnvironment, **kwargs):
+ r"""Creates a new copy of Templar with a new environment.
+
+ :kwarg environment_class: Environment class used for creating a new environment.
+ :kwarg \*\*kwargs: Optional arguments for the new environment that override existing
+ environment attributes.
+
+ :returns: Copy of Templar with updated environment.
+ """
+ # We need to use __new__ to skip __init__, mainly not to create a new
+ # environment there only to override it below
+ new_env = object.__new__(environment_class)
+ new_env.__dict__.update(self.environment.__dict__)
+
+ new_templar = object.__new__(Templar)
+ new_templar.__dict__.update(self.__dict__)
+ new_templar.environment = new_env
+
+ new_templar.jinja2_native = environment_class is AnsibleNativeEnvironment
+
+ mapping = {
+ 'available_variables': new_templar,
+ 'searchpath': new_env.loader,
+ }
+
+ for key, value in kwargs.items():
+ obj = mapping.get(key, new_env)
+ try:
+ if value is not None:
+ setattr(obj, key, value)
+ except AttributeError:
+ # Ignore invalid attrs
+ pass
+
+ return new_templar
+
+ def _get_extensions(self):
+ '''
+ Return jinja2 extensions to load.
+
+ If some extensions are set via jinja_extensions in ansible.cfg, we try
+ to load them with the jinja environment.
+ '''
+
+ jinja_exts = []
+ if C.DEFAULT_JINJA2_EXTENSIONS:
+ # make sure the configuration directive doesn't contain spaces
+ # and split extensions in an array
+ jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
+
+ return jinja_exts
+
+ @property
+ def available_variables(self):
+ return self._available_variables
+
+ @available_variables.setter
+ def available_variables(self, variables):
+ '''
+ Sets the list of template variables this Templar instance will use
+ to template things, so we don't have to pass them around between
+ internal methods. We also clear the template cache here, as the variables
+ are being changed.
+ '''
+
+ if not isinstance(variables, Mapping):
+ raise AnsibleAssertionError("the type of 'variables' should be a Mapping but was a %s" % (type(variables)))
+ self._available_variables = variables
+
+ @contextmanager
+ def set_temporary_context(self, **kwargs):
+ """Context manager used to set temporary templating context, without having to worry about resetting
+ original values afterward
+
+ Use a keyword that maps to the attr you are setting. Applies to ``self.environment`` by default, to
+ set context on another object, it must be in ``mapping``.
+ """
+ mapping = {
+ 'available_variables': self,
+ 'searchpath': self.environment.loader,
+ }
+ original = {}
+
+ for key, value in kwargs.items():
+ obj = mapping.get(key, self.environment)
+ try:
+ original[key] = getattr(obj, key)
+ if value is not None:
+ setattr(obj, key, value)
+ except AttributeError:
+ # Ignore invalid attrs
+ pass
+
+ yield
+
+ for key in original:
+ obj = mapping.get(key, self.environment)
+ setattr(obj, key, original[key])
+
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
+ convert_data=True, static_vars=None, cache=None, disable_lookups=False):
+ '''
+ Templates (possibly recursively) any given data as input. If convert_bare is
+ set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
+ before being sent through the template engine.
+ '''
+ static_vars = [] if static_vars is None else static_vars
+
+ if cache is not None:
+ display.deprecated("The `cache` option to `Templar.template` is no longer functional, and will be removed in a future release.", version='2.18')
+
+ # Don't template unsafe variables, just return them.
+ if hasattr(variable, '__UNSAFE__'):
+ return variable
+
+ if fail_on_undefined is None:
+ fail_on_undefined = self._fail_on_undefined_errors
+
+ if convert_bare:
+ variable = self._convert_bare_variable(variable)
+
+ if isinstance(variable, string_types):
+ if not self.is_possibly_template(variable):
+ return variable
+
+ # Check to see if the string we are trying to render is just referencing a single
+ # var. In this case we don't want to accidentally change the type of the variable
+ # to a string by using the jinja template renderer. We just want to pass it.
+ only_one = self.SINGLE_VAR.match(variable)
+ if only_one:
+ var_name = only_one.group(1)
+ if var_name in self._available_variables:
+ resolved_val = self._available_variables[var_name]
+ if isinstance(resolved_val, NON_TEMPLATED_TYPES):
+ return resolved_val
+ elif resolved_val is None:
+ return C.DEFAULT_NULL_REPRESENTATION
+
+ result = self.do_template(
+ variable,
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ escape_backslashes=escape_backslashes,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ convert_data=convert_data,
+ )
+
+ return result
+
+ elif is_sequence(variable):
+ return [self.template(
+ v,
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ ) for v in variable]
+ elif isinstance(variable, Mapping):
+ d = {}
+ # we don't use iteritems() here to avoid problems if the underlying dict
+ # changes sizes due to the templating, which can happen with hostvars
+ for k in variable.keys():
+ if k not in static_vars:
+ d[k] = self.template(
+ variable[k],
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ )
+ else:
+ d[k] = variable[k]
+ return d
+ else:
+ return variable
+
+ def is_template(self, data):
+ '''lets us know if data has a template'''
+ if isinstance(data, string_types):
+ return is_template(data, self.environment)
+ elif isinstance(data, (list, tuple)):
+ for v in data:
+ if self.is_template(v):
+ return True
+ elif isinstance(data, dict):
+ for k in data:
+ if self.is_template(k) or self.is_template(data[k]):
+ return True
+ return False
+
+ templatable = is_template
+
+ def is_possibly_template(self, data):
+ return is_possibly_template(data, self.environment)
+
+ def _convert_bare_variable(self, variable):
+ '''
+ Wraps a bare string, which may have an attribute portion (ie. foo.bar)
+ in jinja2 variable braces so that it is evaluated properly.
+ '''
+
+ if isinstance(variable, string_types):
+ contains_filters = "|" in variable
+ first_part = variable.split("|")[0].split(".")[0].split("[")[0]
+ if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
+ return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
+
+ # the variable didn't meet the conditions to be converted,
+ # so just return it as-is
+ return variable
+
+ def _fail_lookup(self, name, *args, **kwargs):
+ raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
+
+ def _now_datetime(self, utc=False, fmt=None):
+ '''jinja2 global function to return current datetime, potentially formatted via strftime'''
+ if utc:
+ now = datetime.datetime.utcnow()
+ else:
+ now = datetime.datetime.now()
+
+ if fmt:
+ return now.strftime(fmt)
+
+ return now
+
+ def _query_lookup(self, name, *args, **kwargs):
+ ''' wrapper for lookup, force wantlist true'''
+ kwargs['wantlist'] = True
+ return self._lookup(name, *args, **kwargs)
+
+ def _lookup(self, name, *args, **kwargs):
+ instance = lookup_loader.get(name, loader=self._loader, templar=self)
+
+ if instance is None:
+ raise AnsibleError("lookup plugin (%s) not found" % name)
+
+ wantlist = kwargs.pop('wantlist', False)
+ allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
+ errors = kwargs.pop('errors', 'strict')
+
+ loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, fail_on_undefined=True, convert_bare=False)
+ # safely catch run failures per #5059
+ try:
+ ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ raise AnsibleUndefinedVariable(e)
+ except AnsibleOptionsError as e:
+ # invalid options given to lookup, just reraise
+ raise e
+ except AnsibleLookupError as e:
+ # lookup handled error but still decided to bail
+ msg = 'Lookup failed but the error is being ignored: %s' % to_native(e)
+ if errors == 'warn':
+ display.warning(msg)
+ elif errors == 'ignore':
+ display.display(msg, log_only=True)
+ else:
+ raise e
+ return [] if wantlist else None
+ except Exception as e:
+ # errors not handled by lookup
+ msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
+ (name, type(e), to_text(e))
+ if errors == 'warn':
+ display.warning(msg)
+ elif errors == 'ignore':
+ display.display(msg, log_only=True)
+ else:
+ display.vvv('exception during Jinja2 execution: {0}'.format(format_exc()))
+ raise AnsibleError(to_native(msg), orig_exc=e)
+ return [] if wantlist else None
+
+ if not is_sequence(ran):
+ display.deprecated(
+ f'The lookup plugin \'{name}\' was expected to return a list, got \'{type(ran)}\' instead. '
+ f'The lookup plugin \'{name}\' needs to be changed to return a list. '
+ 'This will be an error in Ansible 2.18',
+ version='2.18'
+ )
+
+ if ran and allow_unsafe is False:
+ if self.cur_context:
+ self.cur_context.unsafe = True
+
+ if wantlist:
+ return wrap_var(ran)
+
+ try:
+ if isinstance(ran[0], NativeJinjaText):
+ ran = wrap_var(NativeJinjaText(",".join(ran)))
+ else:
+ ran = wrap_var(",".join(ran))
+ except TypeError:
+ # Lookup Plugins should always return lists. Throw an error if that's not
+ # the case:
+ if not isinstance(ran, Sequence):
+ raise AnsibleError("The lookup plugin '%s' did not return a list."
+ % name)
+
+ # The TypeError we can recover from is when the value *inside* of the list
+ # is not a string
+ if len(ran) == 1:
+ ran = wrap_var(ran[0])
+ else:
+ ran = wrap_var(ran)
+ except KeyError:
+ # Lookup Plugin returned a dict. Return comma-separated string of keys
+ # for backwards compat.
+ # FIXME this can be removed when support for non-list return types is removed.
+ # See https://github.com/ansible/ansible/pull/77789
+ ran = wrap_var(",".join(ran))
+
+ return ran
+
+ def _make_undefined(self, hint=None):
+ from jinja2.runtime import Undefined
+
+ if hint is None or isinstance(hint, Undefined) or hint == '':
+ hint = "Mandatory variable has not been overridden"
+ return AnsibleUndefined(hint)
+
+ def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False,
+ convert_data=False):
+ if self.jinja2_native and not isinstance(data, string_types):
+ return data
+
+ # For preserving the number of input newlines in the output (used
+ # later in this method)
+ data_newlines = _count_newlines_from_end(data)
+
+ if fail_on_undefined is None:
+ fail_on_undefined = self._fail_on_undefined_errors
+
+ has_template_overrides = data.startswith(JINJA2_OVERRIDE)
+
+ try:
+ # NOTE Creating an overlay that lives only inside do_template means that overrides are not applied
+ # when templating nested variables in AnsibleJ2Vars where Templar.environment is used, not the overlay.
+ # This is historic behavior that is kept for backwards compatibility.
+ if overrides:
+ myenv = self.environment.overlay(overrides)
+ elif has_template_overrides:
+ myenv = self.environment.overlay()
+ else:
+ myenv = self.environment
+
+ # Get jinja env overrides from template
+ if has_template_overrides:
+ eol = data.find('\n')
+ line = data[len(JINJA2_OVERRIDE):eol]
+ data = data[eol + 1:]
+ for pair in line.split(','):
+ if ':' not in pair:
+ raise AnsibleError("failed to parse jinja2 override '%s'."
+ " Did you use something different from colon as key-value separator?" % pair.strip())
+ (key, val) = pair.split(':', 1)
+ key = key.strip()
+ setattr(myenv, key, ast.literal_eval(val.strip()))
+
+ if escape_backslashes:
+ # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
+ data = _escape_backslashes(data, myenv)
+
+ try:
+ t = myenv.from_string(data)
+ except TemplateSyntaxError as e:
+ raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)), orig_exc=e)
+ except Exception as e:
+ if 'recursion' in to_native(e):
+ raise AnsibleError("recursive loop detected in template string: %s" % to_native(data), orig_exc=e)
+ else:
+ return data
+
+ if disable_lookups:
+ t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
+
+ jvars = AnsibleJ2Vars(self, t.globals)
+
+ # In case this is a recursive call to do_template we need to
+ # save/restore cur_context to prevent overriding __UNSAFE__.
+ cached_context = self.cur_context
+
+ # In case this is a recursive call and we set different concat
+ # function up the stack, reset it in case the value of convert_data
+ # changed in this call
+ self.environment.concat = self.environment.__class__.concat
+ # the concat function is set for each Ansible environment,
+ # however for convert_data=False we need to use the concat
+ # function that avoids any evaluation and set it temporarily
+ # on the environment so it is used correctly even when
+ # the concat function is called internally in Jinja,
+ # most notably for macro execution
+ if not self.jinja2_native and not convert_data:
+ self.environment.concat = ansible_concat
+
+ self.cur_context = t.new_context(jvars, shared=True)
+ rf = t.root_render_func(self.cur_context)
+
+ try:
+ res = self.environment.concat(rf)
+ unsafe = getattr(self.cur_context, 'unsafe', False)
+ if unsafe:
+ res = wrap_var(res)
+ except TypeError as te:
+ if 'AnsibleUndefined' in to_native(te):
+ errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
+ errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
+ raise AnsibleUndefinedVariable(errmsg, orig_exc=te)
+ else:
+ display.debug("failing because of a type error, template data is: %s" % to_text(data))
+ raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)), orig_exc=te)
+ finally:
+ self.cur_context = cached_context
+
+ if isinstance(res, string_types) and preserve_trailing_newlines:
+ # The low level calls above do not preserve the newline
+ # characters at the end of the input data, so we use the
+ # calculate the difference in newlines and append them
+ # to the resulting output for parity
+ #
+ # Using Environment's keep_trailing_newline instead would
+ # result in change in behavior when trailing newlines
+ # would be kept also for included templates, for example:
+ # "Hello {% include 'world.txt' %}!" would render as
+ # "Hello world\n!\n" instead of "Hello world!\n".
+ res_newlines = _count_newlines_from_end(res)
+ if data_newlines > res_newlines:
+ res += self.environment.newline_sequence * (data_newlines - res_newlines)
+ if unsafe:
+ res = wrap_var(res)
+ return res
+ except (UndefinedError, AnsibleUndefinedVariable) as e:
+ if fail_on_undefined:
+ raise AnsibleUndefinedVariable(e, orig_exc=e)
+ else:
+ display.debug("Ignoring undefined failure: %s" % to_text(e))
+ return data
+
+ # for backwards compatibility in case anyone is using old private method directly
+ _do_template = do_template
diff --git a/lib/ansible/template/native_helpers.py b/lib/ansible/template/native_helpers.py
new file mode 100644
index 0000000..343e10c
--- /dev/null
+++ b/lib/ansible/template/native_helpers.py
@@ -0,0 +1,144 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import ast
+from itertools import islice, chain
+from types import GeneratorType
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.native_jinja import NativeJinjaText
+from ansible.utils.unsafe_proxy import wrap_var
+
+
+_JSON_MAP = {
+ "true": True,
+ "false": False,
+ "null": None,
+}
+
+
+class Json2Python(ast.NodeTransformer):
+ def visit_Name(self, node):
+ if node.id not in _JSON_MAP:
+ return node
+ return ast.Constant(value=_JSON_MAP[node.id])
+
+
+def ansible_eval_concat(nodes):
+ """Return a string of concatenated compiled nodes. Throw an undefined error
+ if any of the nodes is undefined.
+
+ If the result of concat appears to be a dictionary, list or bool,
+ try and convert it to such using literal_eval, the same mechanism as used
+ in jinja2_native.
+
+ Used in Templar.template() when jinja2_native=False and convert_data=True.
+ """
+ head = list(islice(nodes, 2))
+
+ if not head:
+ return ''
+
+ if len(head) == 1:
+ out = head[0]
+
+ if isinstance(out, NativeJinjaText):
+ return out
+
+ out = to_text(out)
+ else:
+ if isinstance(nodes, GeneratorType):
+ nodes = chain(head, nodes)
+ out = ''.join([to_text(v) for v in nodes])
+
+ # if this looks like a dictionary, list or bool, convert it to such
+ if out.startswith(('{', '[')) or out in ('True', 'False'):
+ unsafe = hasattr(out, '__UNSAFE__')
+ try:
+ out = ast.literal_eval(
+ ast.fix_missing_locations(
+ Json2Python().visit(
+ ast.parse(out, mode='eval')
+ )
+ )
+ )
+ except (ValueError, SyntaxError, MemoryError):
+ pass
+ else:
+ if unsafe:
+ out = wrap_var(out)
+
+ return out
+
+
+def ansible_concat(nodes):
+ """Return a string of concatenated compiled nodes. Throw an undefined error
+ if any of the nodes is undefined. Other than that it is equivalent to
+ Jinja2's default concat function.
+
+ Used in Templar.template() when jinja2_native=False and convert_data=False.
+ """
+ return ''.join([to_text(v) for v in nodes])
+
+
+def ansible_native_concat(nodes):
+ """Return a native Python type from the list of compiled nodes. If the
+ result is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
+
+ https://github.com/pallets/jinja/blob/master/src/jinja2/nativetypes.py
+ """
+ head = list(islice(nodes, 2))
+
+ if not head:
+ return None
+
+ if len(head) == 1:
+ out = head[0]
+
+ # TODO send unvaulted data to literal_eval?
+ if isinstance(out, AnsibleVaultEncryptedUnicode):
+ return out.data
+
+ if isinstance(out, NativeJinjaText):
+ # Sometimes (e.g. ``| string``) we need to mark variables
+ # in a special way so that they remain strings and are not
+ # passed into literal_eval.
+ # See:
+ # https://github.com/ansible/ansible/issues/70831
+ # https://github.com/pallets/jinja/issues/1200
+ # https://github.com/ansible/ansible/issues/70831#issuecomment-664190894
+ return out
+
+ # short-circuit literal_eval for anything other than strings
+ if not isinstance(out, string_types):
+ return out
+ else:
+ if isinstance(nodes, GeneratorType):
+ nodes = chain(head, nodes)
+ out = ''.join([to_text(v) for v in nodes])
+
+ try:
+ evaled = ast.literal_eval(
+ # In Python 3.10+ ast.literal_eval removes leading spaces/tabs
+ # from the given string. For backwards compatibility we need to
+ # parse the string ourselves without removing leading spaces/tabs.
+ ast.parse(out, mode='eval')
+ )
+ except (ValueError, SyntaxError, MemoryError):
+ return out
+
+ if isinstance(evaled, string_types):
+ quote = out[0]
+ return f'{quote}{evaled}{quote}'
+
+ return evaled
diff --git a/lib/ansible/template/template.py b/lib/ansible/template/template.py
new file mode 100644
index 0000000..5eb66da
--- /dev/null
+++ b/lib/ansible/template/template.py
@@ -0,0 +1,45 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from jinja2.nativetypes import NativeTemplate
+
+__all__ = ['AnsibleJ2Template']
+
+
+class AnsibleJ2Template(NativeTemplate):
+ '''
+ A helper class, which prevents Jinja2 from running AnsibleJ2Vars through dict().
+ Without this, {% include %} and similar will create new contexts unlike the special
+ one created in Templar.template. This ensures they are all alike, except for
+ potential locals.
+ '''
+
+ def new_context(self, vars=None, shared=False, locals=None):
+ if vars is None:
+ vars = dict(self.globals or ())
+
+ if isinstance(vars, dict):
+ vars = vars.copy()
+ if locals is not None:
+ vars.update(locals)
+ else:
+ vars = vars.add_locals(locals)
+ return self.environment.context_class(self.environment, vars, self.name, self.blocks)
diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py
new file mode 100644
index 0000000..fd1b812
--- /dev/null
+++ b/lib/ansible/template/vars.py
@@ -0,0 +1,128 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping
+
+from jinja2.utils import missing
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils._text import to_native
+
+
+__all__ = ['AnsibleJ2Vars']
+
+
+class AnsibleJ2Vars(Mapping):
+ '''
+ Helper class to template all variable content before jinja2 sees it. This is
+ done by hijacking the variable storage that jinja2 uses, and overriding __contains__
+ and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
+ hashes that inject tends to be.
+
+ To facilitate using builtin jinja2 things like range, globals are also handled here.
+ '''
+
+ def __init__(self, templar, globals, locals=None):
+ '''
+ Initializes this object with a valid Templar() object, as
+ well as several dictionaries of variables representing
+ different scopes (in jinja2 terminology).
+ '''
+
+ self._templar = templar
+ self._globals = globals
+ self._locals = dict()
+ if isinstance(locals, dict):
+ for key, val in locals.items():
+ if val is not missing:
+ if key[:2] == 'l_':
+ self._locals[key[2:]] = val
+ elif key not in ('context', 'environment', 'template'):
+ self._locals[key] = val
+
+ def __contains__(self, k):
+ if k in self._locals:
+ return True
+ if k in self._templar.available_variables:
+ return True
+ if k in self._globals:
+ return True
+ return False
+
+ def __iter__(self):
+ keys = set()
+ keys.update(self._templar.available_variables, self._locals, self._globals)
+ return iter(keys)
+
+ def __len__(self):
+ keys = set()
+ keys.update(self._templar.available_variables, self._locals, self._globals)
+ return len(keys)
+
+ def __getitem__(self, varname):
+ if varname in self._locals:
+ return self._locals[varname]
+ if varname in self._templar.available_variables:
+ variable = self._templar.available_variables[varname]
+ elif varname in self._globals:
+ return self._globals[varname]
+ else:
+ raise KeyError("undefined variable: %s" % varname)
+
+ # HostVars is special, return it as-is, as is the special variable
+ # 'vars', which contains the vars structure
+ from ansible.vars.hostvars import HostVars
+ if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
+ return variable
+ else:
+ value = None
+ try:
+ value = self._templar.template(variable)
+ except AnsibleUndefinedVariable as e:
+ # Instead of failing here prematurely, return an Undefined
+ # object which fails only after its first usage allowing us to
+ # do lazy evaluation and passing it into filters/tests that
+ # operate on such objects.
+ return self._templar.environment.undefined(
+ hint=f"{variable}: {e.message}",
+ name=varname,
+ exc=AnsibleUndefinedVariable,
+ )
+ except Exception as e:
+ msg = getattr(e, 'message', None) or to_native(e)
+ raise AnsibleError("An unhandled exception occurred while templating '%s'. "
+ "Error was a %s, original message: %s" % (to_native(variable), type(e), msg))
+
+ return value
+
+ def add_locals(self, locals):
+ '''
+ If locals are provided, create a copy of self containing those
+ locals in addition to what is already in this variable proxy.
+ '''
+ if locals is None:
+ return self
+
+ # prior to version 2.9, locals contained all of the vars and not just the current
+ # local vars so this was not necessary for locals to propagate down to nested includes
+ new_locals = self._locals | locals
+
+ return AnsibleJ2Vars(self._templar, self._globals, locals=new_locals)
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
new file mode 100644
index 0000000..ae8ccff
--- /dev/null
+++ b/lib/ansible/utils/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/utils/_junit_xml.py b/lib/ansible/utils/_junit_xml.py
new file mode 100644
index 0000000..3b95867
--- /dev/null
+++ b/lib/ansible/utils/_junit_xml.py
@@ -0,0 +1,267 @@
+"""
+Dataclasses for creating JUnit XML files.
+See: https://github.com/junit-team/junit5/blob/main/platform-tests/src/test/resources/jenkins-junit.xsd
+"""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import datetime
+import decimal
+
+from xml.dom import minidom
+# noinspection PyPep8Naming
+from xml.etree import ElementTree as ET
+
+
+@dataclasses.dataclass # type: ignore[misc] # https://github.com/python/mypy/issues/5374
+class TestResult(metaclass=abc.ABCMeta):
+ """Base class for the result of a test case."""
+ output: str | None = None
+ message: str | None = None
+ type: str | None = None
+
+ def __post_init__(self):
+ if self.type is None:
+ self.type = self.tag
+
+ @property
+ @abc.abstractmethod
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ message=self.message,
+ type=self.type,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element(self.tag, self.get_attributes())
+ element.text = self.output
+
+ return element
+
+
+@dataclasses.dataclass
+class TestFailure(TestResult):
+ """Failure info for a test case."""
+ @property
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+ return 'failure'
+
+
+@dataclasses.dataclass
+class TestError(TestResult):
+ """Error info for a test case."""
+ @property
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+ return 'error'
+
+
+@dataclasses.dataclass
+class TestCase:
+ """An individual test case."""
+ name: str
+ assertions: int | None = None
+ classname: str | None = None
+ status: str | None = None
+ time: decimal.Decimal | None = None
+
+ errors: list[TestError] = dataclasses.field(default_factory=list)
+ failures: list[TestFailure] = dataclasses.field(default_factory=list)
+ skipped: str | None = None
+ system_out: str | None = None
+ system_err: str | None = None
+
+ is_disabled: bool = False
+
+ @property
+ def is_failure(self) -> bool:
+ """True if the test case contains failure info."""
+ return bool(self.failures)
+
+ @property
+ def is_error(self) -> bool:
+ """True if the test case contains error info."""
+ return bool(self.errors)
+
+ @property
+ def is_skipped(self) -> bool:
+ """True if the test case was skipped."""
+ return bool(self.skipped)
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ assertions=self.assertions,
+ classname=self.classname,
+ name=self.name,
+ status=self.status,
+ time=self.time,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testcase', self.get_attributes())
+
+ if self.skipped:
+ ET.SubElement(element, 'skipped').text = self.skipped
+
+ element.extend([error.get_xml_element() for error in self.errors])
+ element.extend([failure.get_xml_element() for failure in self.failures])
+
+ if self.system_out:
+ ET.SubElement(element, 'system-out').text = self.system_out
+
+ if self.system_err:
+ ET.SubElement(element, 'system-err').text = self.system_err
+
+ return element
+
+
+@dataclasses.dataclass
+class TestSuite:
+ """A collection of test cases."""
+ name: str
+ hostname: str | None = None
+ id: str | None = None
+ package: str | None = None
+ timestamp: datetime.datetime | None = None
+
+ properties: dict[str, str] = dataclasses.field(default_factory=dict)
+ cases: list[TestCase] = dataclasses.field(default_factory=list)
+ system_out: str | None = None
+ system_err: str | None = None
+
+ @property
+ def disabled(self) -> int:
+ """The number of disabled test cases."""
+ return sum(case.is_disabled for case in self.cases)
+
+ @property
+ def errors(self) -> int:
+ """The number of test cases containing error info."""
+ return sum(case.is_error for case in self.cases)
+
+ @property
+ def failures(self) -> int:
+ """The number of test cases containing failure info."""
+ return sum(case.is_failure for case in self.cases)
+
+ @property
+ def skipped(self) -> int:
+ """The number of test cases containing skipped info."""
+ return sum(case.is_skipped for case in self.cases)
+
+ @property
+ def tests(self) -> int:
+ """The number of test cases."""
+ return len(self.cases)
+
+ @property
+ def time(self) -> decimal.Decimal:
+ """The total time from all test cases."""
+ return decimal.Decimal(sum(case.time for case in self.cases if case.time))
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ disabled=self.disabled,
+ errors=self.errors,
+ failures=self.failures,
+ hostname=self.hostname,
+ id=self.id,
+ name=self.name,
+ package=self.package,
+ skipped=self.skipped,
+ tests=self.tests,
+ time=self.time,
+ timestamp=self.timestamp.isoformat(timespec='seconds') if self.timestamp else None,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testsuite', self.get_attributes())
+
+ if self.properties:
+ ET.SubElement(element, 'properties').extend([ET.Element('property', dict(name=name, value=value)) for name, value in self.properties.items()])
+
+ element.extend([test_case.get_xml_element() for test_case in self.cases])
+
+ if self.system_out:
+ ET.SubElement(element, 'system-out').text = self.system_out
+
+ if self.system_err:
+ ET.SubElement(element, 'system-err').text = self.system_err
+
+ return element
+
+
+@dataclasses.dataclass
+class TestSuites:
+ """A collection of test suites."""
+ name: str | None = None
+
+ suites: list[TestSuite] = dataclasses.field(default_factory=list)
+
+ @property
+ def disabled(self) -> int:
+ """The number of disabled test cases."""
+ return sum(suite.disabled for suite in self.suites)
+
+ @property
+ def errors(self) -> int:
+ """The number of test cases containing error info."""
+ return sum(suite.errors for suite in self.suites)
+
+ @property
+ def failures(self) -> int:
+ """The number of test cases containing failure info."""
+ return sum(suite.failures for suite in self.suites)
+
+ @property
+ def tests(self) -> int:
+ """The number of test cases."""
+ return sum(suite.tests for suite in self.suites)
+
+ @property
+ def time(self) -> decimal.Decimal:
+ """The total time from all test cases."""
+ return decimal.Decimal(sum(suite.time for suite in self.suites))
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ disabled=self.disabled,
+ errors=self.errors,
+ failures=self.failures,
+ name=self.name,
+ tests=self.tests,
+ time=self.time,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testsuites', self.get_attributes())
+ element.extend([suite.get_xml_element() for suite in self.suites])
+
+ return element
+
+ def to_pretty_xml(self) -> str:
+ """Return a pretty formatted XML string representing this instance."""
+ return _pretty_xml(self.get_xml_element())
+
+
+def _attributes(**kwargs) -> dict[str, str]:
+ """Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted."""
+ return {key: str(value) for key, value in kwargs.items() if value is not None}
+
+
+def _pretty_xml(element: ET.Element) -> str:
+ """Return a pretty formatted XML string representing the given element."""
+ return minidom.parseString(ET.tostring(element, encoding='unicode')).toprettyxml()
diff --git a/lib/ansible/utils/cmd_functions.py b/lib/ansible/utils/cmd_functions.py
new file mode 100644
index 0000000..d4edb2f
--- /dev/null
+++ b/lib/ansible/utils/cmd_functions.py
@@ -0,0 +1,66 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import select
+import shlex
+import subprocess
+import sys
+
+from ansible.module_utils._text import to_bytes
+
+
+def run_cmd(cmd, live=False, readsize=10):
+ cmdargs = shlex.split(cmd)
+
+ # subprocess should be passed byte strings.
+ cmdargs = [to_bytes(a, errors='surrogate_or_strict') for a in cmdargs]
+
+ p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout = b''
+ stderr = b''
+ rpipes = [p.stdout, p.stderr]
+ while True:
+ rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), readsize)
+ if live:
+ sys.stdout.buffer.write(dat)
+ stdout += dat
+ if dat == b'':
+ rpipes.remove(p.stdout)
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), readsize)
+ stderr += dat
+ if live:
+ sys.stdout.buffer.write(dat)
+ if dat == b'':
+ rpipes.remove(p.stderr)
+ # only break out if we've emptied the pipes, or there is nothing to
+ # read from and the process has finished.
+ if (not rpipes or not rfd) and p.poll() is not None:
+ break
+ # Calling wait while there are still pipes to read can cause a lock
+ elif not rpipes and p.poll() is None:
+ p.wait()
+
+ return p.returncode, stdout, stderr
diff --git a/lib/ansible/utils/collection_loader/__init__.py b/lib/ansible/utils/collection_loader/__init__.py
new file mode 100644
index 0000000..83cc246
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/__init__.py
@@ -0,0 +1,26 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# CAUTION: This implementation of the collection loader is used by ansible-test.
+# Because of this, it must be compatible with all Python versions supported on the controller or remote.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# FIXME: decide what of this we want to actually be public/toplevel, put other stuff on a utility class?
+from ._collection_config import AnsibleCollectionConfig
+from ._collection_finder import AnsibleCollectionRef
+from ansible.module_utils.common.text.converters import to_text
+
+
+def resource_from_fqcr(ref):
+ """
+ Return resource from a fully-qualified collection reference,
+ or from a simple resource name.
+ For fully-qualified collection references, this is equivalent to
+ ``AnsibleCollectionRef.from_fqcr(ref).resource``.
+ :param ref: collection reference to parse
+ :return: the resource as a unicode string
+ """
+ ref = to_text(ref, errors='strict')
+ return ref.split(u'.')[-1]
diff --git a/lib/ansible/utils/collection_loader/_collection_config.py b/lib/ansible/utils/collection_loader/_collection_config.py
new file mode 100644
index 0000000..4f73a1a
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_config.py
@@ -0,0 +1,103 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# CAUTION: This implementation of the collection loader is used by ansible-test.
+# Because of this, it must be compatible with all Python versions supported on the controller or remote.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six import add_metaclass
+
+
+class _EventSource:
+ def __init__(self):
+ self._handlers = set()
+
+ def __iadd__(self, handler):
+ if not callable(handler):
+ raise ValueError('handler must be callable')
+ self._handlers.add(handler)
+ return self
+
+ def __isub__(self, handler):
+ try:
+ self._handlers.remove(handler)
+ except KeyError:
+ pass
+
+ return self
+
+ def _on_exception(self, handler, exc, *args, **kwargs):
+ # if we return True, we want the caller to re-raise
+ return True
+
+ def fire(self, *args, **kwargs):
+ for h in self._handlers:
+ try:
+ h(*args, **kwargs)
+ except Exception as ex:
+ if self._on_exception(h, ex, *args, **kwargs):
+ raise
+
+
+class _AnsibleCollectionConfig(type):
+ def __init__(cls, meta, name, bases):
+ cls._collection_finder = None
+ cls._default_collection = None
+ cls._on_collection_load = _EventSource()
+
+ @property
+ def collection_finder(cls):
+ return cls._collection_finder
+
+ @collection_finder.setter
+ def collection_finder(cls, value):
+ if cls._collection_finder:
+ raise ValueError('an AnsibleCollectionFinder has already been configured')
+
+ cls._collection_finder = value
+
+ @property
+ def collection_paths(cls):
+ cls._require_finder()
+ return [to_text(p) for p in cls._collection_finder._n_collection_paths]
+
+ @property
+ def default_collection(cls):
+ return cls._default_collection
+
+ @default_collection.setter
+ def default_collection(cls, value):
+
+ cls._default_collection = value
+
+ @property
+ def on_collection_load(cls):
+ return cls._on_collection_load
+
+ @on_collection_load.setter
+ def on_collection_load(cls, value):
+ if value is not cls._on_collection_load:
+ raise ValueError('on_collection_load is not directly settable (use +=)')
+
+ @property
+ def playbook_paths(cls):
+ cls._require_finder()
+ return [to_text(p) for p in cls._collection_finder._n_playbook_paths]
+
+ @playbook_paths.setter
+ def playbook_paths(cls, value):
+ cls._require_finder()
+ cls._collection_finder.set_playbook_paths(value)
+
+ def _require_finder(cls):
+ if not cls._collection_finder:
+ raise NotImplementedError('an AnsibleCollectionFinder has not been installed in this process')
+
+
+# concrete class of our metaclass type that defines the class properties we want
+@add_metaclass(_AnsibleCollectionConfig)
+class AnsibleCollectionConfig(object):
+ pass
diff --git a/lib/ansible/utils/collection_loader/_collection_finder.py b/lib/ansible/utils/collection_loader/_collection_finder.py
new file mode 100644
index 0000000..d3a8765
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_finder.py
@@ -0,0 +1,1161 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# CAUTION: This implementation of the collection loader is used by ansible-test.
+# Because of this, it must be compatible with all Python versions supported on the controller or remote.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pkgutil
+import re
+import sys
+from keyword import iskeyword
+from tokenize import Name as _VALID_IDENTIFIER_REGEX
+
+
+# DO NOT add new non-stdlib import deps here, this loader is used by external tools (eg ansible-test import sanity)
+# that only allow stdlib and module_utils
+from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
+from ansible.module_utils.six import string_types, PY3
+from ._collection_config import AnsibleCollectionConfig
+
+from contextlib import contextmanager
+from types import ModuleType
+
+try:
+ from importlib import import_module
+except ImportError:
+ def import_module(name): # type: ignore[misc]
+ __import__(name)
+ return sys.modules[name]
+
+try:
+ from importlib import reload as reload_module
+except ImportError:
+ # 2.7 has a global reload function instead...
+ reload_module = reload # type: ignore[name-defined] # pylint:disable=undefined-variable
+
+try:
+ from importlib.util import spec_from_loader
+except ImportError:
+ pass
+
+try:
+ from importlib.machinery import FileFinder
+except ImportError:
+ HAS_FILE_FINDER = False
+else:
+ HAS_FILE_FINDER = True
+
+# NB: this supports import sanity test providing a different impl
+try:
+ from ._collection_meta import _meta_yml_to_dict
+except ImportError:
+ _meta_yml_to_dict = None
+
+
+if not hasattr(__builtins__, 'ModuleNotFoundError'):
+ # this was introduced in Python 3.6
+ ModuleNotFoundError = ImportError
+
+
+_VALID_IDENTIFIER_STRING_REGEX = re.compile(
+ ''.join((_VALID_IDENTIFIER_REGEX, r'\Z')),
+)
+
+
+try: # NOTE: py3/py2 compat
+ # py2 mypy can't deal with try/excepts
+ is_python_identifier = str.isidentifier # type: ignore[attr-defined]
+except AttributeError: # Python 2
+ def is_python_identifier(self): # type: (str) -> bool
+ """Determine whether the given string is a Python identifier."""
+ # Ref: https://stackoverflow.com/a/55802320/595220
+ return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self))
+
+
+PB_EXTENSIONS = ('.yml', '.yaml')
+
+
+class _AnsibleCollectionFinder:
+ def __init__(self, paths=None, scan_sys_paths=True):
+ # TODO: accept metadata loader override
+ self._ansible_pkg_path = to_native(os.path.dirname(to_bytes(sys.modules['ansible'].__file__)))
+
+ if isinstance(paths, string_types):
+ paths = [paths]
+ elif paths is None:
+ paths = []
+
+ # expand any placeholders in configured paths
+ paths = [os.path.expanduser(to_native(p, errors='surrogate_or_strict')) for p in paths]
+
+ # add syspaths if needed
+ if scan_sys_paths:
+ paths.extend(sys.path)
+
+ good_paths = []
+ # expand any placeholders in configured paths
+ for p in paths:
+
+ # ensure we always have ansible_collections
+ if os.path.basename(p) == 'ansible_collections':
+ p = os.path.dirname(p)
+
+ if p not in good_paths and os.path.isdir(to_bytes(os.path.join(p, 'ansible_collections'), errors='surrogate_or_strict')):
+ good_paths.append(p)
+
+ self._n_configured_paths = good_paths
+ self._n_cached_collection_paths = None
+ self._n_cached_collection_qualified_paths = None
+
+ self._n_playbook_paths = []
+
+ @classmethod
+ def _remove(cls):
+ for mps in sys.meta_path:
+ if isinstance(mps, _AnsibleCollectionFinder):
+ sys.meta_path.remove(mps)
+
+ # remove any path hooks that look like ours
+ for ph in sys.path_hooks:
+ if hasattr(ph, '__self__') and isinstance(ph.__self__, _AnsibleCollectionFinder):
+ sys.path_hooks.remove(ph)
+
+ # zap any cached path importer cache entries that might refer to us
+ sys.path_importer_cache.clear()
+
+ AnsibleCollectionConfig._collection_finder = None
+
+ # validate via the public property that we really killed it
+ if AnsibleCollectionConfig.collection_finder is not None:
+ raise AssertionError('_AnsibleCollectionFinder remove did not reset AnsibleCollectionConfig.collection_finder')
+
+ def _install(self):
+ self._remove()
+ sys.meta_path.insert(0, self)
+
+ sys.path_hooks.insert(0, self._ansible_collection_path_hook)
+
+ AnsibleCollectionConfig.collection_finder = self
+
+ def _ansible_collection_path_hook(self, path):
+ path = to_native(path)
+ interesting_paths = self._n_cached_collection_qualified_paths
+ if not interesting_paths:
+ interesting_paths = []
+ for p in self._n_collection_paths:
+ if os.path.basename(p) != 'ansible_collections':
+ p = os.path.join(p, 'ansible_collections')
+
+ if p not in interesting_paths:
+ interesting_paths.append(p)
+
+ interesting_paths.insert(0, self._ansible_pkg_path)
+ self._n_cached_collection_qualified_paths = interesting_paths
+
+ if any(path.startswith(p) for p in interesting_paths):
+ return _AnsiblePathHookFinder(self, path)
+
+ raise ImportError('not interested')
+
+ @property
+ def _n_collection_paths(self):
+ paths = self._n_cached_collection_paths
+ if not paths:
+ self._n_cached_collection_paths = paths = self._n_playbook_paths + self._n_configured_paths
+ return paths
+
+ def set_playbook_paths(self, playbook_paths):
+ if isinstance(playbook_paths, string_types):
+ playbook_paths = [playbook_paths]
+
+ # track visited paths; we have to preserve the dir order as-passed in case there are duplicate collections (first one wins)
+ added_paths = set()
+
+ # de-dupe
+ self._n_playbook_paths = [os.path.join(to_native(p), 'collections') for p in playbook_paths if not (p in added_paths or added_paths.add(p))]
+ self._n_cached_collection_paths = None
+ # HACK: playbook CLI sets this relatively late, so we've already loaded some packages whose paths might depend on this. Fix those up.
+ # NB: this should NOT be used for late additions; ideally we'd fix the playbook dir setup earlier in Ansible init
+ # to prevent this from occurring
+ for pkg in ['ansible_collections', 'ansible_collections.ansible']:
+ self._reload_hack(pkg)
+
+ def _reload_hack(self, fullname):
+ m = sys.modules.get(fullname)
+ if not m:
+ return
+ reload_module(m)
+
+ def _get_loader(self, fullname, path=None):
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+ module_to_find = split_name[-1]
+ part_count = len(split_name)
+
+ if toplevel_pkg not in ['ansible', 'ansible_collections']:
+ # not interested in anything other than ansible_collections (and limited cases under ansible)
+ return None
+
+ # sanity check what we're getting from import, canonicalize path values
+ if part_count == 1:
+ if path:
+ raise ValueError('path should not be specified for top-level packages (trying to find {0})'.format(fullname))
+ else:
+ # seed the path to the configured collection roots
+ path = self._n_collection_paths
+
+ if part_count > 1 and path is None:
+ raise ValueError('path must be specified for subpackages (trying to find {0})'.format(fullname))
+
+ if toplevel_pkg == 'ansible':
+ # something under the ansible package, delegate to our internal loader in case of redirections
+ initialize_loader = _AnsibleInternalRedirectLoader
+ elif part_count == 1:
+ initialize_loader = _AnsibleCollectionRootPkgLoader
+ elif part_count == 2: # ns pkg eg, ansible_collections, ansible_collections.somens
+ initialize_loader = _AnsibleCollectionNSPkgLoader
+ elif part_count == 3: # collection pkg eg, ansible_collections.somens.somecoll
+ initialize_loader = _AnsibleCollectionPkgLoader
+ else:
+ # anything below the collection
+ initialize_loader = _AnsibleCollectionLoader
+
+ # NB: actual "find"ing is delegated to the constructors on the various loaders; they'll ImportError if not found
+ try:
+ return initialize_loader(fullname=fullname, path_list=path)
+ except ImportError:
+ # TODO: log attempt to load context
+ return None
+
+ def find_module(self, fullname, path=None):
+ # Figure out what's being asked for, and delegate to a special-purpose loader
+ return self._get_loader(fullname, path)
+
+ def find_spec(self, fullname, path, target=None):
+ loader = self._get_loader(fullname, path)
+
+ if loader is None:
+ return None
+
+ spec = spec_from_loader(fullname, loader)
+ if spec is not None and hasattr(loader, '_subpackage_search_paths'):
+ spec.submodule_search_locations = loader._subpackage_search_paths
+ return spec
+
+
+# Implements a path_hook finder for iter_modules (since it's only path based). This finder does not need to actually
+# function as a finder in most cases, since our meta_path finder is consulted first for *almost* everything, except
+# pkgutil.iter_modules, and under py2, pkgutil.get_data if the parent package passed has not been loaded yet.
+class _AnsiblePathHookFinder:
+ def __init__(self, collection_finder, pathctx):
+ # when called from a path_hook, find_module doesn't usually get the path arg, so this provides our context
+ self._pathctx = to_native(pathctx)
+ self._collection_finder = collection_finder
+ if PY3:
+ # cache the native FileFinder (take advantage of its filesystem cache for future find/load requests)
+ self._file_finder = None
+
+ # class init is fun- this method has a self arg that won't get used
+ def _get_filefinder_path_hook(self=None):
+ _file_finder_hook = None
+ if PY3:
+ # try to find the FileFinder hook to call for fallback path-based imports in Py3
+ _file_finder_hook = [ph for ph in sys.path_hooks if 'FileFinder' in repr(ph)]
+ if len(_file_finder_hook) != 1:
+ raise Exception('need exactly one FileFinder import hook (found {0})'.format(len(_file_finder_hook)))
+ _file_finder_hook = _file_finder_hook[0]
+
+ return _file_finder_hook
+
+ _filefinder_path_hook = _get_filefinder_path_hook()
+
+ def _get_finder(self, fullname):
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+
+ if toplevel_pkg == 'ansible_collections':
+ # collections content? delegate to the collection finder
+ return self._collection_finder
+ else:
+ # Something else; we'd normally restrict this to `ansible` descendent modules so that any weird loader
+ # behavior that arbitrary Python modules have can be serviced by those loaders. In some dev/test
+ # scenarios (eg a venv under a collection) our path_hook signs us up to load non-Ansible things, and
+ # it's too late by the time we've reached this point, but also too expensive for the path_hook to figure
+ # out what we *shouldn't* be loading with the limited info it has. So we'll just delegate to the
+ # normal path-based loader as best we can to service it. This also allows us to take advantage of Python's
+ # built-in FS caching and byte-compilation for most things.
+ if PY3:
+ # create or consult our cached file finder for this path
+ if not self._file_finder:
+ try:
+ self._file_finder = _AnsiblePathHookFinder._filefinder_path_hook(self._pathctx)
+ except ImportError:
+ # FUTURE: log at a high logging level? This is normal for things like python36.zip on the path, but
+ # might not be in some other situation...
+ return None
+
+ return self._file_finder
+
+ # call py2's internal loader
+ return pkgutil.ImpImporter(self._pathctx)
+
+ def find_module(self, fullname, path=None):
+ # we ignore the passed in path here- use what we got from the path hook init
+ finder = self._get_finder(fullname)
+
+ if finder is None:
+ return None
+ elif HAS_FILE_FINDER and isinstance(finder, FileFinder):
+ # this codepath is erroneously used under some cases in py3,
+ # and the find_module method on FileFinder does not accept the path arg
+ # see https://github.com/pypa/setuptools/pull/2918
+ return finder.find_module(fullname)
+ else:
+ return finder.find_module(fullname, path=[self._pathctx])
+
+ def find_spec(self, fullname, target=None):
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+
+ finder = self._get_finder(fullname)
+
+ if finder is None:
+ return None
+ elif toplevel_pkg == 'ansible_collections':
+ return finder.find_spec(fullname, path=[self._pathctx])
+ else:
+ return finder.find_spec(fullname)
+
+ def iter_modules(self, prefix):
+ # NB: this currently represents only what's on disk, and does not handle package redirection
+ return _iter_modules_impl([self._pathctx], prefix)
+
+ def __repr__(self):
+ return "{0}(path='{1}')".format(self.__class__.__name__, self._pathctx)
+
+
+class _AnsibleCollectionPkgLoaderBase:
+ _allows_package_code = False
+
+ def __init__(self, fullname, path_list=None):
+ self._fullname = fullname
+ self._redirect_module = None
+ self._split_name = fullname.split('.')
+ self._rpart_name = fullname.rpartition('.')
+ self._parent_package_name = self._rpart_name[0] # eg ansible_collections for ansible_collections.somens, '' for toplevel
+ self._package_to_load = self._rpart_name[2] # eg somens for ansible_collections.somens
+
+ self._source_code_path = None
+ self._decoded_source = None
+ self._compiled_code = None
+
+ self._validate_args()
+
+ self._candidate_paths = self._get_candidate_paths([to_native(p) for p in path_list])
+ self._subpackage_search_paths = self._get_subpackage_search_paths(self._candidate_paths)
+
+ self._validate_final()
+
+ # allow subclasses to validate args and sniff split values before we start digging around
+ def _validate_args(self):
+ if self._split_name[0] != 'ansible_collections':
+ raise ImportError('this loader can only load packages from the ansible_collections package, not {0}'.format(self._fullname))
+
+ # allow subclasses to customize candidate path filtering
+ def _get_candidate_paths(self, path_list):
+ return [os.path.join(p, self._package_to_load) for p in path_list]
+
+ # allow subclasses to customize finding paths
+ def _get_subpackage_search_paths(self, candidate_paths):
+ # filter candidate paths for existence (NB: silently ignoring package init code and same-named modules)
+ return [p for p in candidate_paths if os.path.isdir(to_bytes(p))]
+
+ # allow subclasses to customize state validation/manipulation before we return the loader instance
+ def _validate_final(self):
+ return
+
+ @staticmethod
+ @contextmanager
+ def _new_or_existing_module(name, **kwargs):
+ # handle all-or-nothing sys.modules creation/use-existing/delete-on-exception-if-created behavior
+ created_module = False
+ module = sys.modules.get(name)
+ try:
+ if not module:
+ module = ModuleType(name)
+ created_module = True
+ sys.modules[name] = module
+ # always override the values passed, except name (allow reference aliasing)
+ for attr, value in kwargs.items():
+ setattr(module, attr, value)
+ yield module
+ except Exception:
+ if created_module:
+ if sys.modules.get(name):
+ sys.modules.pop(name)
+ raise
+
+ # basic module/package location support
+ # NB: this does not support distributed packages!
+ @staticmethod
+ def _module_file_from_path(leaf_name, path):
+ has_code = True
+ package_path = os.path.join(to_native(path), to_native(leaf_name))
+ module_path = None
+
+ # if the submodule is a package, assemble valid submodule paths, but stop looking for a module
+ if os.path.isdir(to_bytes(package_path)):
+ # is there a package init?
+ module_path = os.path.join(package_path, '__init__.py')
+ if not os.path.isfile(to_bytes(module_path)):
+ module_path = os.path.join(package_path, '__synthetic__')
+ has_code = False
+ else:
+ module_path = package_path + '.py'
+ package_path = None
+ if not os.path.isfile(to_bytes(module_path)):
+ raise ImportError('{0} not found at {1}'.format(leaf_name, path))
+
+ return module_path, has_code, package_path
+
+ def exec_module(self, module):
+ # short-circuit redirect; avoid reinitializing existing modules
+ if self._redirect_module:
+ return
+
+ # execute the module's code in its namespace
+ code_obj = self.get_code(self._fullname)
+ if code_obj is not None: # things like NS packages that can't have code on disk will return None
+ exec(code_obj, module.__dict__)
+
+ def create_module(self, spec):
+ # short-circuit redirect; we've already imported the redirected module, so just alias it and return it
+ if self._redirect_module:
+ return self._redirect_module
+ else:
+ return None
+
+ def load_module(self, fullname):
+ # short-circuit redirect; we've already imported the redirected module, so just alias it and return it
+ if self._redirect_module:
+ sys.modules[self._fullname] = self._redirect_module
+ return self._redirect_module
+
+ # we're actually loading a module/package
+ module_attrs = dict(
+ __loader__=self,
+ __file__=self.get_filename(fullname),
+ __package__=self._parent_package_name # sane default for non-packages
+ )
+
+ # eg, I am a package
+ if self._subpackage_search_paths is not None: # empty is legal
+ module_attrs['__path__'] = self._subpackage_search_paths
+ module_attrs['__package__'] = fullname # per PEP366
+
+ with self._new_or_existing_module(fullname, **module_attrs) as module:
+ # execute the module's code in its namespace
+ code_obj = self.get_code(fullname)
+ if code_obj is not None: # things like NS packages that can't have code on disk will return None
+ exec(code_obj, module.__dict__)
+
+ return module
+
+ def is_package(self, fullname):
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot answer is_package for {0}, only {1}'.format(fullname, self._fullname))
+ return self._subpackage_search_paths is not None
+
+ def get_source(self, fullname):
+ if self._decoded_source:
+ return self._decoded_source
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot load source for {0}, only {1}'.format(fullname, self._fullname))
+ if not self._source_code_path:
+ return None
+ # FIXME: what do we want encoding/newline requirements to be?
+ self._decoded_source = self.get_data(self._source_code_path)
+ return self._decoded_source
+
+ def get_data(self, path):
+ if not path:
+ raise ValueError('a path must be specified')
+
+ # TODO: ensure we're being asked for a path below something we own
+ # TODO: try to handle redirects internally?
+
+ if not path[0] == '/':
+ # relative to current package, search package paths if possible (this may not be necessary)
+ # candidate_paths = [os.path.join(ssp, path) for ssp in self._subpackage_search_paths]
+ raise ValueError('relative resource paths not supported')
+ else:
+ candidate_paths = [path]
+
+ for p in candidate_paths:
+ b_path = to_bytes(p)
+ if os.path.isfile(b_path):
+ with open(b_path, 'rb') as fd:
+ return fd.read()
+ # HACK: if caller asks for __init__.py and the parent dir exists, return empty string (this keep consistency
+ # with "collection subpackages don't require __init__.py" working everywhere with get_data
+ elif b_path.endswith(b'__init__.py') and os.path.isdir(os.path.dirname(b_path)):
+ return ''
+
+ return None
+
+ def _synthetic_filename(self, fullname):
+ return '<ansible_synthetic_collection_package>'
+
+ def get_filename(self, fullname):
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot find files for {0}, only {1}'.format(fullname, self._fullname))
+
+ filename = self._source_code_path
+
+ if not filename and self.is_package(fullname):
+ if len(self._subpackage_search_paths) == 1:
+ filename = os.path.join(self._subpackage_search_paths[0], '__synthetic__')
+ else:
+ filename = self._synthetic_filename(fullname)
+
+ return filename
+
+ def get_code(self, fullname):
+ if self._compiled_code:
+ return self._compiled_code
+
+ # this may or may not be an actual filename, but it's the value we'll use for __file__
+ filename = self.get_filename(fullname)
+ if not filename:
+ filename = '<string>'
+
+ source_code = self.get_source(fullname)
+
+ # for things like synthetic modules that really have no source on disk, don't return a code object at all
+ # vs things like an empty package init (which has an empty string source on disk)
+ if source_code is None:
+ return None
+
+ self._compiled_code = compile(source=source_code, filename=filename, mode='exec', flags=0, dont_inherit=True)
+
+ return self._compiled_code
+
+ def iter_modules(self, prefix):
+ return _iter_modules_impl(self._subpackage_search_paths, prefix)
+
+ def __repr__(self):
+ return '{0}(path={1})'.format(self.__class__.__name__, self._subpackage_search_paths or self._source_code_path)
+
+
+class _AnsibleCollectionRootPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionRootPkgLoader, self)._validate_args()
+ if len(self._split_name) != 1:
+ raise ImportError('this loader can only load the ansible_collections toplevel package, not {0}'.format(self._fullname))
+
+
+# Implements Ansible's custom namespace package support.
+# The ansible_collections package and one level down (collections namespaces) are Python namespace packages
+# that search across all configured collection roots. The collection package (two levels down) is the first one found
+# on the configured collection root path, and Python namespace package aggregation is not allowed at or below
+# the collection. Implements implicit package (package dir) support for both Py2/3. Package init code is ignored
+# by this loader.
+class _AnsibleCollectionNSPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionNSPkgLoader, self)._validate_args()
+ if len(self._split_name) != 2:
+ raise ImportError('this loader can only load collections namespace packages, not {0}'.format(self._fullname))
+
+ def _validate_final(self):
+ # special-case the `ansible` namespace, since `ansible.builtin` is magical
+ if not self._subpackage_search_paths and self._package_to_load != 'ansible':
+ raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
+
+
+# handles locating the actual collection package and associated metadata
+class _AnsibleCollectionPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionPkgLoader, self)._validate_args()
+ if len(self._split_name) != 3:
+ raise ImportError('this loader can only load collection packages, not {0}'.format(self._fullname))
+
+ def _validate_final(self):
+ if self._split_name[1:3] == ['ansible', 'builtin']:
+ # we don't want to allow this one to have on-disk search capability
+ self._subpackage_search_paths = []
+ elif not self._subpackage_search_paths:
+ raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
+ else:
+ # only search within the first collection we found
+ self._subpackage_search_paths = [self._subpackage_search_paths[0]]
+
+ def _load_module(self, module):
+ if not _meta_yml_to_dict:
+ raise ValueError('ansible.utils.collection_loader._meta_yml_to_dict is not set')
+
+ module._collection_meta = {}
+ # TODO: load collection metadata, cache in __loader__ state
+
+ collection_name = '.'.join(self._split_name[1:3])
+
+ if collection_name == 'ansible.builtin':
+ # ansible.builtin is a synthetic collection, get its routing config from the Ansible distro
+ ansible_pkg_path = os.path.dirname(import_module('ansible').__file__)
+ metadata_path = os.path.join(ansible_pkg_path, 'config/ansible_builtin_runtime.yml')
+ with open(to_bytes(metadata_path), 'rb') as fd:
+ raw_routing = fd.read()
+ else:
+ b_routing_meta_path = to_bytes(os.path.join(module.__path__[0], 'meta/runtime.yml'))
+ if os.path.isfile(b_routing_meta_path):
+ with open(b_routing_meta_path, 'rb') as fd:
+ raw_routing = fd.read()
+ else:
+ raw_routing = ''
+ try:
+ if raw_routing:
+ routing_dict = _meta_yml_to_dict(raw_routing, (collection_name, 'runtime.yml'))
+ module._collection_meta = self._canonicalize_meta(routing_dict)
+ except Exception as ex:
+ raise ValueError('error parsing collection metadata: {0}'.format(to_native(ex)))
+
+ AnsibleCollectionConfig.on_collection_load.fire(collection_name=collection_name, collection_path=os.path.dirname(module.__file__))
+
+ return module
+
+ def exec_module(self, module):
+ super(_AnsibleCollectionPkgLoader, self).exec_module(module)
+ self._load_module(module)
+
+ def create_module(self, spec):
+ return None
+
+ def load_module(self, fullname):
+ module = super(_AnsibleCollectionPkgLoader, self).load_module(fullname)
+ return self._load_module(module)
+
+ def _canonicalize_meta(self, meta_dict):
+ # TODO: rewrite import keys and all redirect targets that start with .. (current namespace) and . (current collection)
+ # OR we could do it all on the fly?
+ # if not meta_dict:
+ # return {}
+ #
+ # ns_name = '.'.join(self._split_name[0:2])
+ # collection_name = '.'.join(self._split_name[0:3])
+ #
+ # #
+ # for routing_type, routing_type_dict in iteritems(meta_dict.get('plugin_routing', {})):
+ # for plugin_key, plugin_dict in iteritems(routing_type_dict):
+ # redirect = plugin_dict.get('redirect', '')
+ # if redirect.startswith('..'):
+ # redirect = redirect[2:]
+
+ return meta_dict
+
+
+# loads everything under a collection, including handling redirections defined by the collection
+class _AnsibleCollectionLoader(_AnsibleCollectionPkgLoaderBase):
+ # HACK: stash this in a better place
+ _redirected_package_map = {} # type: dict[str, str]
+ _allows_package_code = True
+
+ def _validate_args(self):
+ super(_AnsibleCollectionLoader, self)._validate_args()
+ if len(self._split_name) < 4:
+ raise ValueError('this loader is only for sub-collection modules/packages, not {0}'.format(self._fullname))
+
+ def _get_candidate_paths(self, path_list):
+ if len(path_list) != 1 and self._split_name[1:3] != ['ansible', 'builtin']:
+ raise ValueError('this loader requires exactly one path to search')
+
+ return path_list
+
+ def _get_subpackage_search_paths(self, candidate_paths):
+ collection_name = '.'.join(self._split_name[1:3])
+ collection_meta = _get_collection_metadata(collection_name)
+
+ # check for explicit redirection, as well as ancestor package-level redirection (only load the actual code once!)
+ redirect = None
+ explicit_redirect = False
+
+ routing_entry = _nested_dict_get(collection_meta, ['import_redirection', self._fullname])
+ if routing_entry:
+ redirect = routing_entry.get('redirect')
+
+ if redirect:
+ explicit_redirect = True
+ else:
+ redirect = _get_ancestor_redirect(self._redirected_package_map, self._fullname)
+
+ # NB: package level redirection requires hooking all future imports beneath the redirected source package
+ # in order to ensure sanity on future relative imports. We always import everything under its "real" name,
+ # then add a sys.modules entry with the redirected name using the same module instance. If we naively imported
+ # the source for each redirection, most submodules would import OK, but we'd have N runtime copies of the module
+ # (one for each name), and relative imports that ascend above the redirected package would break (since they'd
+ # see the redirected ancestor package contents instead of the package where they actually live).
+ if redirect:
+ # FIXME: wrap this so we can be explicit about a failed redirection
+ self._redirect_module = import_module(redirect)
+ if explicit_redirect and hasattr(self._redirect_module, '__path__') and self._redirect_module.__path__:
+ # if the import target looks like a package, store its name so we can rewrite future descendent loads
+ self._redirected_package_map[self._fullname] = redirect
+
+ # if we redirected, don't do any further custom package logic
+ return None
+
+ # we're not doing a redirect- try to find what we need to actually load a module/package
+
+ # this will raise ImportError if we can't find the requested module/package at all
+ if not candidate_paths:
+ # noplace to look, just ImportError
+ raise ImportError('package has no paths')
+
+ found_path, has_code, package_path = self._module_file_from_path(self._package_to_load, candidate_paths[0])
+
+ # still here? we found something to load...
+ if has_code:
+ self._source_code_path = found_path
+
+ if package_path:
+ return [package_path] # always needs to be a list
+
+ return None
+
+
+# This loader only answers for intercepted Ansible Python modules. Normal imports will fail here and be picked up later
+# by our path_hook importer (which proxies the built-in import mechanisms, allowing normal caching etc to occur)
+class _AnsibleInternalRedirectLoader:
+ def __init__(self, fullname, path_list):
+ self._redirect = None
+
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+ module_to_load = split_name[-1]
+
+ if toplevel_pkg != 'ansible':
+ raise ImportError('not interested')
+
+ builtin_meta = _get_collection_metadata('ansible.builtin')
+
+ routing_entry = _nested_dict_get(builtin_meta, ['import_redirection', fullname])
+ if routing_entry:
+ self._redirect = routing_entry.get('redirect')
+
+ if not self._redirect:
+ raise ImportError('not redirected, go ask path_hook')
+
+ def exec_module(self, module):
+ # should never see this
+ if not self._redirect:
+ raise ValueError('no redirect found for {0}'.format(module.__spec__.name))
+
+ # Replace the module with the redirect
+ sys.modules[module.__spec__.name] = import_module(self._redirect)
+
+ def create_module(self, spec):
+ return None
+
+ def load_module(self, fullname):
+ # since we're delegating to other loaders, this should only be called for internal redirects where we answered
+ # find_module with this loader, in which case we'll just directly import the redirection target, insert it into
+ # sys.modules under the name it was requested by, and return the original module.
+
+ # should never see this
+ if not self._redirect:
+ raise ValueError('no redirect found for {0}'.format(fullname))
+
+ # FIXME: smuggle redirection context, provide warning/error that we tried and failed to redirect
+ mod = import_module(self._redirect)
+ sys.modules[fullname] = mod
+ return mod
+
+
+class AnsibleCollectionRef:
+ # FUTURE: introspect plugin loaders to get these dynamically?
+ VALID_REF_TYPES = frozenset(to_text(r) for r in ['action', 'become', 'cache', 'callback', 'cliconf', 'connection',
+ 'doc_fragments', 'filter', 'httpapi', 'inventory', 'lookup',
+ 'module_utils', 'modules', 'netconf', 'role', 'shell', 'strategy',
+ 'terminal', 'test', 'vars', 'playbook'])
+
+ # FIXME: tighten this up to match Python identifier reqs, etc
+ VALID_SUBDIRS_RE = re.compile(to_text(r'^\w+(\.\w+)*$'))
+ VALID_FQCR_RE = re.compile(to_text(r'^\w+(\.\w+){2,}$')) # can have 0-N included subdirs as well
+
+ def __init__(self, collection_name, subdirs, resource, ref_type):
+ """
+ Create an AnsibleCollectionRef from components
+ :param collection_name: a collection name of the form 'namespace.collectionname'
+ :param subdirs: optional subdir segments to be appended below the plugin type (eg, 'subdir1.subdir2')
+ :param resource: the name of the resource being references (eg, 'mymodule', 'someaction', 'a_role')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ """
+ collection_name = to_text(collection_name, errors='strict')
+ if subdirs is not None:
+ subdirs = to_text(subdirs, errors='strict')
+ resource = to_text(resource, errors='strict')
+ ref_type = to_text(ref_type, errors='strict')
+
+ if not self.is_valid_collection_name(collection_name):
+ raise ValueError('invalid collection name (must be of the form namespace.collection): {0}'.format(to_native(collection_name)))
+
+ if ref_type not in self.VALID_REF_TYPES:
+ raise ValueError('invalid collection ref_type: {0}'.format(ref_type))
+
+ self.collection = collection_name
+ if subdirs:
+ if not re.match(self.VALID_SUBDIRS_RE, subdirs):
+ raise ValueError('invalid subdirs entry: {0} (must be empty/None or of the form subdir1.subdir2)'.format(to_native(subdirs)))
+ self.subdirs = subdirs
+ else:
+ self.subdirs = u''
+
+ self.resource = resource
+ self.ref_type = ref_type
+
+ package_components = [u'ansible_collections', self.collection]
+ fqcr_components = [self.collection]
+
+ self.n_python_collection_package_name = to_native('.'.join(package_components))
+
+ if self.ref_type == u'role':
+ package_components.append(u'roles')
+ elif self.ref_type == u'playbook':
+ package_components.append(u'playbooks')
+ else:
+ # we assume it's a plugin
+ package_components += [u'plugins', self.ref_type]
+
+ if self.subdirs:
+ package_components.append(self.subdirs)
+ fqcr_components.append(self.subdirs)
+
+ if self.ref_type in (u'role', u'playbook'):
+ # playbooks and roles are their own resource
+ package_components.append(self.resource)
+
+ fqcr_components.append(self.resource)
+
+ self.n_python_package_name = to_native('.'.join(package_components))
+ self._fqcr = u'.'.join(fqcr_components)
+
+ def __repr__(self):
+ return 'AnsibleCollectionRef(collection={0!r}, subdirs={1!r}, resource={2!r})'.format(self.collection, self.subdirs, self.resource)
+
+ @property
+ def fqcr(self):
+ return self._fqcr
+
+ @staticmethod
+ def from_fqcr(ref, ref_type):
+ """
+ Parse a string as a fully-qualified collection reference, raises ValueError if invalid
+ :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ :return: a populated AnsibleCollectionRef object
+ """
+ # assuming the fq_name is of the form (ns).(coll).(optional_subdir_N).(resource_name),
+ # we split the resource name off the right, split ns and coll off the left, and we're left with any optional
+ # subdirs that need to be added back below the plugin-specific subdir we'll add. So:
+ # ns.coll.resource -> ansible_collections.ns.coll.plugins.(plugintype).resource
+ # ns.coll.subdir1.resource -> ansible_collections.ns.coll.plugins.subdir1.(plugintype).resource
+ # ns.coll.rolename -> ansible_collections.ns.coll.roles.rolename
+ if not AnsibleCollectionRef.is_valid_fqcr(ref):
+ raise ValueError('{0} is not a valid collection reference'.format(to_native(ref)))
+
+ ref = to_text(ref, errors='strict')
+ ref_type = to_text(ref_type, errors='strict')
+ ext = ''
+
+ if ref_type == u'playbook' and ref.endswith(PB_EXTENSIONS):
+ resource_splitname = ref.rsplit(u'.', 2)
+ package_remnant = resource_splitname[0]
+ resource = resource_splitname[1]
+ ext = '.' + resource_splitname[2]
+ else:
+ resource_splitname = ref.rsplit(u'.', 1)
+ package_remnant = resource_splitname[0]
+ resource = resource_splitname[1]
+
+ # split the left two components of the collection package name off, anything remaining is plugin-type
+ # specific subdirs to be added back on below the plugin type
+ package_splitname = package_remnant.split(u'.', 2)
+ if len(package_splitname) == 3:
+ subdirs = package_splitname[2]
+ else:
+ subdirs = u''
+
+ collection_name = u'.'.join(package_splitname[0:2])
+
+ return AnsibleCollectionRef(collection_name, subdirs, resource + ext, ref_type)
+
+ @staticmethod
+ def try_parse_fqcr(ref, ref_type):
+ """
+ Attempt to parse a string as a fully-qualified collection reference, returning None on failure (instead of raising an error)
+ :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ :return: a populated AnsibleCollectionRef object on successful parsing, else None
+ """
+ try:
+ return AnsibleCollectionRef.from_fqcr(ref, ref_type)
+ except ValueError:
+ pass
+
+ @staticmethod
+ def legacy_plugin_dir_to_plugin_type(legacy_plugin_dir_name):
+ """
+ Utility method to convert from a PluginLoader dir name to a plugin ref_type
+ :param legacy_plugin_dir_name: PluginLoader dir name (eg, 'action_plugins', 'library')
+ :return: the corresponding plugin ref_type (eg, 'action', 'role')
+ """
+ legacy_plugin_dir_name = to_text(legacy_plugin_dir_name)
+
+ plugin_type = legacy_plugin_dir_name.removesuffix(u'_plugins')
+
+ if plugin_type == u'library':
+ plugin_type = u'modules'
+
+ if plugin_type not in AnsibleCollectionRef.VALID_REF_TYPES:
+ raise ValueError('{0} cannot be mapped to a valid collection ref type'.format(to_native(legacy_plugin_dir_name)))
+
+ return plugin_type
+
+ @staticmethod
+ def is_valid_fqcr(ref, ref_type=None):
+ """
+ Validates if is string is a well-formed fully-qualified collection reference (does not look up the collection itself)
+ :param ref: candidate collection reference to validate (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: optional reference type to enable deeper validation, eg 'module', 'role', 'doc_fragment'
+ :return: True if the collection ref passed is well-formed, False otherwise
+ """
+
+ ref = to_text(ref)
+
+ if not ref_type:
+ return bool(re.match(AnsibleCollectionRef.VALID_FQCR_RE, ref))
+
+ return bool(AnsibleCollectionRef.try_parse_fqcr(ref, ref_type))
+
+ @staticmethod
+ def is_valid_collection_name(collection_name):
+ """
+ Validates if the given string is a well-formed collection name (does not look up the collection itself)
+ :param collection_name: candidate collection name to validate (a valid name is of the form 'ns.collname')
+ :return: True if the collection name passed is well-formed, False otherwise
+ """
+
+ collection_name = to_text(collection_name)
+
+ if collection_name.count(u'.') != 1:
+ return False
+
+ return all(
+ # NOTE: keywords and identifiers are different in different Pythons
+ not iskeyword(ns_or_name) and is_python_identifier(ns_or_name)
+ for ns_or_name in collection_name.split(u'.')
+ )
+
+
+def _get_collection_path(collection_name):
+ collection_name = to_native(collection_name)
+ if not collection_name or not isinstance(collection_name, string_types) or len(collection_name.split('.')) != 2:
+ raise ValueError('collection_name must be a non-empty string of the form namespace.collection')
+ try:
+ collection_pkg = import_module('ansible_collections.' + collection_name)
+ except ImportError:
+ raise ValueError('unable to locate collection {0}'.format(collection_name))
+
+ return to_native(os.path.dirname(to_bytes(collection_pkg.__file__)))
+
+
+def _get_collection_playbook_path(playbook):
+
+ acr = AnsibleCollectionRef.try_parse_fqcr(playbook, u'playbook')
+ if acr:
+ try:
+ # get_collection_path
+ pkg = import_module(acr.n_python_collection_package_name)
+ except (IOError, ModuleNotFoundError) as e:
+ # leaving e as debug target, even though not used in normal code
+ pkg = None
+
+ if pkg:
+ cpath = os.path.join(sys.modules[acr.n_python_collection_package_name].__file__.replace('__synthetic__', 'playbooks'))
+
+ if acr.subdirs:
+ paths = [to_native(x) for x in acr.subdirs.split(u'.')]
+ paths.insert(0, cpath)
+ cpath = os.path.join(*paths)
+
+ path = os.path.join(cpath, to_native(acr.resource))
+ if os.path.exists(to_bytes(path)):
+ return acr.resource, path, acr.collection
+ elif not acr.resource.endswith(PB_EXTENSIONS):
+ for ext in PB_EXTENSIONS:
+ path = os.path.join(cpath, to_native(acr.resource + ext))
+ if os.path.exists(to_bytes(path)):
+ return acr.resource, path, acr.collection
+ return None
+
+
+def _get_collection_role_path(role_name, collection_list=None):
+ return _get_collection_resource_path(role_name, u'role', collection_list)
+
+
+def _get_collection_resource_path(name, ref_type, collection_list=None):
+
+ if ref_type == u'playbook':
+ # they are handled a bit diff due to 'extension variance' and no collection_list
+ return _get_collection_playbook_path(name)
+
+ acr = AnsibleCollectionRef.try_parse_fqcr(name, ref_type)
+ if acr:
+ # looks like a valid qualified collection ref; skip the collection_list
+ collection_list = [acr.collection]
+ subdirs = acr.subdirs
+ resource = acr.resource
+ elif not collection_list:
+ return None # not a FQ and no collection search list spec'd, nothing to do
+ else:
+ resource = name # treat as unqualified, loop through the collection search list to try and resolve
+ subdirs = ''
+
+ for collection_name in collection_list:
+ try:
+ acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type=ref_type)
+ # FIXME: error handling/logging; need to catch any import failures and move along
+ pkg = import_module(acr.n_python_package_name)
+
+ if pkg is not None:
+ # the package is now loaded, get the collection's package and ask where it lives
+ path = os.path.dirname(to_bytes(sys.modules[acr.n_python_package_name].__file__, errors='surrogate_or_strict'))
+ return resource, to_text(path, errors='surrogate_or_strict'), collection_name
+
+ except (IOError, ModuleNotFoundError) as e:
+ continue
+ except Exception as ex:
+ # FIXME: pick out typical import errors first, then error logging
+ continue
+
+ return None
+
+
+def _get_collection_name_from_path(path):
+ """
+ Return the containing collection name for a given path, or None if the path is not below a configured collection, or
+ the collection cannot be loaded (eg, the collection is masked by another of the same name higher in the configured
+ collection roots).
+ :param path: path to evaluate for collection containment
+ :return: collection name or None
+ """
+
+ # ensure we compare full paths since pkg path will be abspath
+ path = to_native(os.path.abspath(to_bytes(path)))
+
+ path_parts = path.split('/')
+ if path_parts.count('ansible_collections') != 1:
+ return None
+
+ ac_pos = path_parts.index('ansible_collections')
+
+ # make sure it's followed by at least a namespace and collection name
+ if len(path_parts) < ac_pos + 3:
+ return None
+
+ candidate_collection_name = '.'.join(path_parts[ac_pos + 1:ac_pos + 3])
+
+ try:
+ # we've got a name for it, now see if the path prefix matches what the loader sees
+ imported_pkg_path = to_native(os.path.dirname(to_bytes(import_module('ansible_collections.' + candidate_collection_name).__file__)))
+ except ImportError:
+ return None
+
+ # reassemble the original path prefix up the collection name, and it should match what we just imported. If not
+ # this is probably a collection root that's not configured.
+
+ original_path_prefix = os.path.join('/', *path_parts[0:ac_pos + 3])
+
+ imported_pkg_path = to_native(os.path.abspath(to_bytes(imported_pkg_path)))
+ if original_path_prefix != imported_pkg_path:
+ return None
+
+ return candidate_collection_name
+
+
+def _get_import_redirect(collection_meta_dict, fullname):
+ if not collection_meta_dict:
+ return None
+
+ return _nested_dict_get(collection_meta_dict, ['import_redirection', fullname, 'redirect'])
+
+
+def _get_ancestor_redirect(redirected_package_map, fullname):
+ # walk the requested module's ancestor packages to see if any have been previously redirected
+ cur_pkg = fullname
+ while cur_pkg:
+ cur_pkg = cur_pkg.rpartition('.')[0]
+ ancestor_redirect = redirected_package_map.get(cur_pkg)
+ if ancestor_redirect:
+ # rewrite the prefix on fullname so we import the target first, then alias it
+ redirect = ancestor_redirect + fullname[len(cur_pkg):]
+ return redirect
+ return None
+
+
+def _nested_dict_get(root_dict, key_list):
+ cur_value = root_dict
+ for key in key_list:
+ cur_value = cur_value.get(key)
+ if not cur_value:
+ return None
+
+ return cur_value
+
+
+def _iter_modules_impl(paths, prefix=''):
+ # NB: this currently only iterates what's on disk- redirected modules are not considered
+ if not prefix:
+ prefix = ''
+ else:
+ prefix = to_native(prefix)
+ # yield (module_loader, name, ispkg) for each module/pkg under path
+ # TODO: implement ignore/silent catch for unreadable?
+ for b_path in map(to_bytes, paths):
+ if not os.path.isdir(b_path):
+ continue
+ for b_basename in sorted(os.listdir(b_path)):
+ b_candidate_module_path = os.path.join(b_path, b_basename)
+ if os.path.isdir(b_candidate_module_path):
+ # exclude things that obviously aren't Python package dirs
+ # FIXME: this dir is adjustable in py3.8+, check for it
+ if b'.' in b_basename or b_basename == b'__pycache__':
+ continue
+
+ # TODO: proper string handling?
+ yield prefix + to_native(b_basename), True
+ else:
+ # FIXME: match builtin ordering for package/dir/file, support compiled?
+ if b_basename.endswith(b'.py') and b_basename != b'__init__.py':
+ yield prefix + to_native(os.path.splitext(b_basename)[0]), False
+
+
+def _get_collection_metadata(collection_name):
+ collection_name = to_native(collection_name)
+ if not collection_name or not isinstance(collection_name, string_types) or len(collection_name.split('.')) != 2:
+ raise ValueError('collection_name must be a non-empty string of the form namespace.collection')
+
+ try:
+ collection_pkg = import_module('ansible_collections.' + collection_name)
+ except ImportError:
+ raise ValueError('unable to locate collection {0}'.format(collection_name))
+
+ _collection_meta = getattr(collection_pkg, '_collection_meta', None)
+
+ if _collection_meta is None:
+ raise ValueError('collection metadata was not loaded for collection {0}'.format(collection_name))
+
+ return _collection_meta
diff --git a/lib/ansible/utils/collection_loader/_collection_meta.py b/lib/ansible/utils/collection_loader/_collection_meta.py
new file mode 100644
index 0000000..deaac8e
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_meta.py
@@ -0,0 +1,32 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# CAUTION: This implementation of the collection loader is used by ansible-test.
+# Because of this, it must be compatible with all Python versions supported on the controller or remote.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections import Mapping # type: ignore[no-redef,attr-defined] # pylint: disable=ansible-bad-import-from
+
+from ansible.module_utils.common.yaml import yaml_load
+
+
+def _meta_yml_to_dict(yaml_string_data, content_id):
+ """
+ Converts string YAML dictionary to a Python dictionary. This function may be monkeypatched to another implementation
+ by some tools (eg the import sanity test).
+ :param yaml_string_data: a bytes-ish YAML dictionary
+ :param content_id: a unique ID representing the content to allow other implementations to cache the output
+ :return: a Python dictionary representing the YAML dictionary content
+ """
+ # NB: content_id is passed in, but not used by this implementation
+ routing_dict = yaml_load(yaml_string_data)
+ if not routing_dict:
+ routing_dict = {}
+ if not isinstance(routing_dict, Mapping):
+ raise ValueError('collection metadata must be an instance of Python Mapping')
+ return routing_dict
diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py
new file mode 100644
index 0000000..be8fb00
--- /dev/null
+++ b/lib/ansible/utils/color.py
@@ -0,0 +1,112 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+from ansible import constants as C
+
+ANSIBLE_COLOR = True
+if C.ANSIBLE_NOCOLOR:
+ ANSIBLE_COLOR = False
+elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
+ ANSIBLE_COLOR = False
+else:
+ try:
+ import curses
+ curses.setupterm()
+ if curses.tigetnum('colors') < 0:
+ ANSIBLE_COLOR = False
+ except ImportError:
+ # curses library was not found
+ pass
+ except curses.error:
+ # curses returns an error (e.g. could not find terminal)
+ ANSIBLE_COLOR = False
+
+if C.ANSIBLE_FORCE_COLOR:
+ ANSIBLE_COLOR = True
+
+# --- begin "pretty"
+#
+# pretty - A miniature library that provides a Python print and stdout
+# wrapper that makes colored terminal text easier to use (e.g. without
+# having to mess around with ANSI escape sequences). This code is public
+# domain - there is no license except that you must leave this header.
+#
+# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
+
+
+def parsecolor(color):
+ """SGR parameter string for the specified color name."""
+ matches = re.match(r"color(?P<color>[0-9]+)"
+ r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
+ r"|gray(?P<gray>[0-9]+)", color)
+ if not matches:
+ return C.COLOR_CODES[color]
+ if matches.group('color'):
+ return u'38;5;%d' % int(matches.group('color'))
+ if matches.group('rgb'):
+ return u'38;5;%d' % (16 + 36 * int(matches.group('red')) +
+ 6 * int(matches.group('green')) +
+ int(matches.group('blue')))
+ if matches.group('gray'):
+ return u'38;5;%d' % (232 + int(matches.group('gray')))
+
+
+def stringc(text, color, wrap_nonvisible_chars=False):
+ """String in color."""
+
+ if ANSIBLE_COLOR:
+ color_code = parsecolor(color)
+ fmt = u"\033[%sm%s\033[0m"
+ if wrap_nonvisible_chars:
+ # This option is provided for use in cases when the
+ # formatting of a command line prompt is needed, such as
+ # `ansible-console`. As said in `readline` sources:
+ # readline/display.c:321
+ # /* Current implementation:
+ # \001 (^A) start non-visible characters
+ # \002 (^B) end non-visible characters
+ # all characters except \001 and \002 (following a \001) are copied to
+ # the returned string; all characters except those between \001 and
+ # \002 are assumed to be `visible'. */
+ fmt = u"\001\033[%sm\002%s\001\033[0m\002"
+ return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
+ else:
+ return text
+
+
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ s = u"%s=%-4s" % (lead, str(num))
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ s = stringc(s, color)
+ return s
+
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return u"%-37s" % stringc(host, C.COLOR_ERROR)
+ elif stats['changed'] != 0:
+ return u"%-37s" % stringc(host, C.COLOR_CHANGED)
+ else:
+ return u"%-37s" % stringc(host, C.COLOR_OK)
+ return u"%-26s" % host
diff --git a/lib/ansible/utils/context_objects.py b/lib/ansible/utils/context_objects.py
new file mode 100644
index 0000000..efe15fe
--- /dev/null
+++ b/lib/ansible/utils/context_objects.py
@@ -0,0 +1,92 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Hold command line arguments for use in other modules
+"""
+
+from abc import ABCMeta
+from collections.abc import Container, Mapping, Sequence, Set
+
+from ansible.module_utils.common.collections import ImmutableDict
+from ansible.module_utils.six import add_metaclass, binary_type, text_type
+from ansible.utils.singleton import Singleton
+
+
+def _make_immutable(obj):
+ """Recursively convert a container and objects inside of it into immutable data types"""
+ if isinstance(obj, (text_type, binary_type)):
+ # Strings first because they are also sequences
+ return obj
+ elif isinstance(obj, Mapping):
+ temp_dict = {}
+ for key, value in obj.items():
+ if isinstance(value, Container):
+ temp_dict[key] = _make_immutable(value)
+ else:
+ temp_dict[key] = value
+ return ImmutableDict(temp_dict)
+ elif isinstance(obj, Set):
+ temp_set = set()
+ for value in obj:
+ if isinstance(value, Container):
+ temp_set.add(_make_immutable(value))
+ else:
+ temp_set.add(value)
+ return frozenset(temp_set)
+ elif isinstance(obj, Sequence):
+ temp_sequence = []
+ for value in obj:
+ if isinstance(value, Container):
+ temp_sequence.append(_make_immutable(value))
+ else:
+ temp_sequence.append(value)
+ return tuple(temp_sequence)
+
+ return obj
+
+
+class _ABCSingleton(Singleton, ABCMeta):
+ """
+ Combine ABCMeta based classes with Singleton based classes
+
+ Combine Singleton and ABCMeta so we have a metaclass that unambiguously knows which can override
+ the other. Useful for making new types of containers which are also Singletons.
+ """
+ pass
+
+
+class CLIArgs(ImmutableDict):
+ """
+ Hold a parsed copy of cli arguments
+
+ We have both this non-Singleton version and the Singleton, GlobalCLIArgs, version to leave us
+ room to implement a Context object in the future. Whereas there should only be one set of args
+ in a global context, individual Context objects might want to pretend that they have different
+ command line switches to trigger different behaviour when they run. So if we support Contexts
+ in the future, they would use CLIArgs instead of GlobalCLIArgs to store their version of command
+ line flags.
+ """
+ def __init__(self, mapping):
+ toplevel = {}
+ for key, value in mapping.items():
+ toplevel[key] = _make_immutable(value)
+ super(CLIArgs, self).__init__(toplevel)
+
+ @classmethod
+ def from_options(cls, options):
+ return cls(vars(options))
+
+
+@add_metaclass(_ABCSingleton)
+class GlobalCLIArgs(CLIArgs):
+ """
+ Globally hold a parsed copy of cli arguments.
+
+ Only one of these exist per program as it is for global context
+ """
+ pass
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
new file mode 100644
index 0000000..e521f2a
--- /dev/null
+++ b/lib/ansible/utils/display.py
@@ -0,0 +1,526 @@
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ctypes.util
+import fcntl
+import getpass
+import logging
+import os
+import random
+import subprocess
+import sys
+import textwrap
+import threading
+import time
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six import text_type
+from ansible.utils.color import stringc
+from ansible.utils.multiprocessing import context as multiprocessing_context
+from ansible.utils.singleton import Singleton
+from ansible.utils.unsafe_proxy import wrap_var
+from functools import wraps
+
+
+_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
+# Set argtypes, to avoid segfault if the wrong type is provided,
+# restype is assumed to be c_int
+_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
+_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
+# Max for c_int
+_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
+
+
+def get_text_width(text):
+ """Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
+ number of columns used to display a text string.
+
+ We try first with ``wcswidth``, and fallback to iterating each
+ character and using wcwidth individually, falling back to a value of 0
+ for non-printable wide characters.
+ """
+ if not isinstance(text, text_type):
+ raise TypeError('get_text_width requires text, not %s' % type(text))
+
+ try:
+ width = _LIBC.wcswidth(text, _MAX_INT)
+ except ctypes.ArgumentError:
+ width = -1
+ if width != -1:
+ return width
+
+ width = 0
+ counter = 0
+ for c in text:
+ counter += 1
+ if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
+ # A few characters result in a subtraction of length:
+ # BS, DEL, CCH, ESC
+ # ESC is slightly different in that it's part of an escape sequence, and
+ # while ESC is non printable, it's part of an escape sequence, which results
+ # in a single non printable length
+ width -= 1
+ counter -= 1
+ continue
+
+ try:
+ w = _LIBC.wcwidth(c)
+ except ctypes.ArgumentError:
+ w = -1
+ if w == -1:
+ # -1 signifies a non-printable character
+ # use 0 here as a best effort
+ w = 0
+ width += w
+
+ if width == 0 and counter:
+ raise EnvironmentError(
+ 'get_text_width could not calculate text width of %r' % text
+ )
+
+ # It doesn't make sense to have a negative printable width
+ return width if width >= 0 else 0
+
+
+class FilterBlackList(logging.Filter):
+ def __init__(self, blacklist):
+ self.blacklist = [logging.Filter(name) for name in blacklist]
+
+ def filter(self, record):
+ return not any(f.filter(record) for f in self.blacklist)
+
+
+class FilterUserInjector(logging.Filter):
+ """
+ This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
+ to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
+ """
+
+ try:
+ username = getpass.getuser()
+ except KeyError:
+ # people like to make containers w/o actual valid passwd/shadow and use host uids
+ username = 'uid=%s' % os.getuid()
+
+ def filter(self, record):
+ record.user = FilterUserInjector.username
+ return True
+
+
+logger = None
+# TODO: make this a callback event instead
+if getattr(C, 'DEFAULT_LOG_PATH'):
+ path = C.DEFAULT_LOG_PATH
+ if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
+ # NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
+ logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
+ format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
+
+ logger = logging.getLogger('ansible')
+ for handler in logging.root.handlers:
+ handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
+ handler.addFilter(FilterUserInjector())
+ else:
+ print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
+
+# map color to log levels
+color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
+ C.COLOR_WARN: logging.WARNING,
+ C.COLOR_OK: logging.INFO,
+ C.COLOR_SKIP: logging.WARNING,
+ C.COLOR_UNREACHABLE: logging.ERROR,
+ C.COLOR_DEBUG: logging.DEBUG,
+ C.COLOR_CHANGED: logging.INFO,
+ C.COLOR_DEPRECATE: logging.WARNING,
+ C.COLOR_VERBOSE: logging.INFO}
+
+b_COW_PATHS = (
+ b"/usr/bin/cowsay",
+ b"/usr/games/cowsay",
+ b"/usr/local/bin/cowsay", # BSD path for cowsay
+ b"/opt/local/bin/cowsay", # MacPorts path for cowsay
+)
+
+
+def _synchronize_textiowrapper(tio, lock):
+ # Ensure that a background thread can't hold the internal buffer lock on a file object
+ # during a fork, which causes forked children to hang. We're using display's existing lock for
+ # convenience (and entering the lock before a fork).
+ def _wrap_with_lock(f, lock):
+ @wraps(f)
+ def locking_wrapper(*args, **kwargs):
+ with lock:
+ return f(*args, **kwargs)
+
+ return locking_wrapper
+
+ buffer = tio.buffer
+
+ # monkeypatching the underlying file-like object isn't great, but likely safer than subclassing
+ buffer.write = _wrap_with_lock(buffer.write, lock)
+ buffer.flush = _wrap_with_lock(buffer.flush, lock)
+
+
+class Display(metaclass=Singleton):
+
+ def __init__(self, verbosity=0):
+
+ self._final_q = None
+
+ # NB: this lock is used to both prevent intermingled output between threads and to block writes during forks.
+ # Do not change the type of this lock or upgrade to a shared lock (eg multiprocessing.RLock).
+ self._lock = threading.RLock()
+
+ self.columns = None
+ self.verbosity = verbosity
+
+ # list of all deprecation messages to prevent duplicate display
+ self._deprecations = {}
+ self._warns = {}
+ self._errors = {}
+
+ self.b_cowsay = None
+ self.noncow = C.ANSIBLE_COW_SELECTION
+
+ self.set_cowsay_info()
+
+ if self.b_cowsay:
+ try:
+ cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ if cmd.returncode:
+ raise Exception
+ self.cows_available = {to_text(c) for c in out.split()} # set comprehension
+ if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
+ self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
+ except Exception:
+ # could not execute cowsay for some reason
+ self.b_cowsay = False
+
+ self._set_column_width()
+
+ try:
+ # NB: we're relying on the display singleton behavior to ensure this only runs once
+ _synchronize_textiowrapper(sys.stdout, self._lock)
+ _synchronize_textiowrapper(sys.stderr, self._lock)
+ except Exception as ex:
+ self.warning(f"failed to patch stdout/stderr for fork-safety: {ex}")
+
+ def set_queue(self, queue):
+ """Set the _final_q on Display, so that we know to proxy display over the queue
+ instead of directly writing to stdout/stderr from forks
+
+ This is only needed in ansible.executor.process.worker:WorkerProcess._run
+ """
+ if multiprocessing_context.parent_process() is None:
+ raise RuntimeError('queue cannot be set in parent process')
+ self._final_q = queue
+
+ def set_cowsay_info(self):
+ if C.ANSIBLE_NOCOWS:
+ return
+
+ if C.ANSIBLE_COW_PATH:
+ self.b_cowsay = C.ANSIBLE_COW_PATH
+ else:
+ for b_cow_path in b_COW_PATHS:
+ if os.path.exists(b_cow_path):
+ self.b_cowsay = b_cow_path
+
+ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
+ """ Display a message to the user
+
+ Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
+ """
+
+ if self._final_q:
+ # If _final_q is set, that means we are in a WorkerProcess
+ # and instead of displaying messages directly from the fork
+ # we will proxy them through the queue
+ return self._final_q.send_display(msg, color=color, stderr=stderr,
+ screen_only=screen_only, log_only=log_only, newline=newline)
+
+ nocolor = msg
+
+ if not log_only:
+
+ has_newline = msg.endswith(u'\n')
+ if has_newline:
+ msg2 = msg[:-1]
+ else:
+ msg2 = msg
+
+ if color:
+ msg2 = stringc(msg2, color)
+
+ if has_newline or newline:
+ msg2 = msg2 + u'\n'
+
+ # Note: After Display() class is refactored need to update the log capture
+ # code in 'bin/ansible-connection' (and other relevant places).
+ if not stderr:
+ fileobj = sys.stdout
+ else:
+ fileobj = sys.stderr
+
+ with self._lock:
+ fileobj.write(msg2)
+
+ # With locks, and the fact that we aren't printing from forks
+ # just write, and let the system flush. Everything should come out peachy
+ # I've left this code for historical purposes, or in case we need to add this
+ # back at a later date. For now ``TaskQueueManager.cleanup`` will perform a
+ # final flush at shutdown.
+ # try:
+ # fileobj.flush()
+ # except IOError as e:
+ # # Ignore EPIPE in case fileobj has been prematurely closed, eg.
+ # # when piping to "head -n1"
+ # if e.errno != errno.EPIPE:
+ # raise
+
+ if logger and not screen_only:
+ msg2 = nocolor.lstrip('\n')
+
+ lvl = logging.INFO
+ if color:
+ # set logger level based on color (not great)
+ try:
+ lvl = color_to_log_level[color]
+ except KeyError:
+ # this should not happen, but JIC
+ raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
+ # actually log
+ logger.log(lvl, msg2)
+
+ def v(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=0)
+
+ def vv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=1)
+
+ def vvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=2)
+
+ def vvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=3)
+
+ def vvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=4)
+
+ def vvvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=5)
+
+ def debug(self, msg, host=None):
+ if C.DEFAULT_DEBUG:
+ if host is None:
+ self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
+ else:
+ self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
+
+ def verbose(self, msg, host=None, caplevel=2):
+
+ to_stderr = C.VERBOSE_TO_STDERR
+ if self.verbosity > caplevel:
+ if host is None:
+ self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
+ else:
+ self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
+
+ def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
+ ''' used to print out a deprecation message.'''
+ msg = msg.strip()
+ if msg and msg[-1] not in ['!', '?', '.']:
+ msg += '.'
+
+ if collection_name == 'ansible.builtin':
+ collection_name = 'ansible-core'
+
+ if removed:
+ header = '[DEPRECATED]: {0}'.format(msg)
+ removal_fragment = 'This feature was removed'
+ help_text = 'Please update your playbooks.'
+ else:
+ header = '[DEPRECATION WARNING]: {0}'.format(msg)
+ removal_fragment = 'This feature will be removed'
+ # FUTURE: make this a standalone warning so it only shows up once?
+ help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
+
+ if collection_name:
+ from_fragment = 'from {0}'.format(collection_name)
+ else:
+ from_fragment = ''
+
+ if date:
+ when = 'in a release after {0}.'.format(date)
+ elif version:
+ when = 'in version {0}.'.format(version)
+ else:
+ when = 'in a future release.'
+
+ message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
+
+ return message_text
+
+ def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
+ if not removed and not C.DEPRECATION_WARNINGS:
+ return
+
+ message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
+
+ if removed:
+ raise AnsibleError(message_text)
+
+ wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
+ message_text = "\n".join(wrapped) + "\n"
+
+ if message_text not in self._deprecations:
+ self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
+ self._deprecations[message_text] = 1
+
+ def warning(self, msg, formatted=False):
+
+ if not formatted:
+ new_msg = "[WARNING]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, self.columns)
+ new_msg = "\n".join(wrapped) + "\n"
+ else:
+ new_msg = "\n[WARNING]: \n%s" % msg
+
+ if new_msg not in self._warns:
+ self.display(new_msg, color=C.COLOR_WARN, stderr=True)
+ self._warns[new_msg] = 1
+
+ def system_warning(self, msg):
+ if C.SYSTEM_WARNINGS:
+ self.warning(msg)
+
+ def banner(self, msg, color=None, cows=True):
+ '''
+ Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
+ '''
+ msg = to_text(msg)
+
+ if self.b_cowsay and cows:
+ try:
+ self.banner_cowsay(msg)
+ return
+ except OSError:
+ self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
+
+ msg = msg.strip()
+ try:
+ star_len = self.columns - get_text_width(msg)
+ except EnvironmentError:
+ star_len = self.columns - len(msg)
+ if star_len <= 3:
+ star_len = 3
+ stars = u"*" * star_len
+ self.display(u"\n%s %s" % (msg, stars), color=color)
+
+ def banner_cowsay(self, msg, color=None):
+ if u": [" in msg:
+ msg = msg.replace(u"[", u"")
+ if msg.endswith(u"]"):
+ msg = msg[:-1]
+ runcmd = [self.b_cowsay, b"-W", b"60"]
+ if self.noncow:
+ thecow = self.noncow
+ if thecow == 'random':
+ thecow = random.choice(list(self.cows_available))
+ runcmd.append(b'-f')
+ runcmd.append(to_bytes(thecow))
+ runcmd.append(to_bytes(msg))
+ cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ self.display(u"%s\n" % to_text(out), color=color)
+
+ def error(self, msg, wrap_text=True):
+ if wrap_text:
+ new_msg = u"\n[ERROR]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, self.columns)
+ new_msg = u"\n".join(wrapped) + u"\n"
+ else:
+ new_msg = u"ERROR! %s" % msg
+ if new_msg not in self._errors:
+ self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
+ self._errors[new_msg] = 1
+
+ @staticmethod
+ def prompt(msg, private=False):
+ if private:
+ return getpass.getpass(msg)
+ else:
+ return input(msg)
+
+ def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+
+ result = None
+ if sys.__stdin__.isatty():
+
+ do_prompt = self.prompt
+
+ if prompt and default is not None:
+ msg = "%s [%s]: " % (prompt, default)
+ elif prompt:
+ msg = "%s: " % prompt
+ else:
+ msg = 'input for %s: ' % varname
+
+ if confirm:
+ while True:
+ result = do_prompt(msg, private)
+ second = do_prompt("confirm " + msg, private)
+ if result == second:
+ break
+ self.display("***** VALUES ENTERED DO NOT MATCH ****")
+ else:
+ result = do_prompt(msg, private)
+ else:
+ result = None
+ self.warning("Not prompting as we are not in interactive mode")
+
+ # if result is false and default is not None
+ if not result and default is not None:
+ result = default
+
+ if encrypt:
+ # Circular import because encrypt needs a display class
+ from ansible.utils.encrypt import do_encrypt
+ result = do_encrypt(result, encrypt, salt_size, salt)
+
+ # handle utf-8 chars
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if unsafe:
+ result = wrap_var(result)
+ return result
+
+ def _set_column_width(self):
+ if os.isatty(1):
+ tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
+ else:
+ tty_size = 0
+ self.columns = max(79, tty_size - 1)
diff --git a/lib/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
new file mode 100644
index 0000000..3a8642d
--- /dev/null
+++ b/lib/ansible/utils/encrypt.py
@@ -0,0 +1,272 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+import random
+import re
+import string
+import sys
+
+from collections import namedtuple
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.utils.display import Display
+
+PASSLIB_E = CRYPT_E = None
+HAS_CRYPT = PASSLIB_AVAILABLE = False
+try:
+ import passlib
+ import passlib.hash
+ from passlib.utils.handlers import HasRawSalt, PrefixWrapper
+ try:
+ from passlib.utils.binary import bcrypt64
+ except ImportError:
+ from passlib.utils import bcrypt64
+ PASSLIB_AVAILABLE = True
+except Exception as e:
+ PASSLIB_E = e
+
+try:
+ import crypt
+ HAS_CRYPT = True
+except Exception as e:
+ CRYPT_E = e
+
+
+display = Display()
+
+__all__ = ['do_encrypt']
+
+_LOCK = multiprocessing.Lock()
+
+DEFAULT_PASSWORD_LENGTH = 20
+
+
+def random_password(length=DEFAULT_PASSWORD_LENGTH, chars=C.DEFAULT_PASSWORD_CHARS, seed=None):
+ '''Return a random password string of length containing only chars
+
+ :kwarg length: The number of characters in the new password. Defaults to 20.
+ :kwarg chars: The characters to choose from. The default is all ascii
+ letters, ascii digits, and these symbols ``.,:-_``
+ '''
+ if not isinstance(chars, text_type):
+ raise AnsibleAssertionError('%s (%s) is not a text_type' % (chars, type(chars)))
+
+ if seed is None:
+ random_generator = random.SystemRandom()
+ else:
+ random_generator = random.Random(seed)
+ return u''.join(random_generator.choice(chars) for dummy in range(length))
+
+
+def random_salt(length=8):
+ """Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
+ """
+ # Note passlib salt values must be pure ascii so we can't let the user
+ # configure this
+ salt_chars = string.ascii_letters + string.digits + u'./'
+ return random_password(length=length, chars=salt_chars)
+
+
+class BaseHash(object):
+ algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds', 'salt_exact', 'implicit_ident'])
+ algorithms = {
+ 'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None, salt_exact=False, implicit_ident=None),
+ 'bcrypt': algo(crypt_id='2b', salt_size=22, implicit_rounds=12, salt_exact=True, implicit_ident='2b'),
+ 'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=535000, salt_exact=False, implicit_ident=None),
+ 'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=656000, salt_exact=False, implicit_ident=None),
+ }
+
+ def __init__(self, algorithm):
+ self.algorithm = algorithm
+
+
+class CryptHash(BaseHash):
+ def __init__(self, algorithm):
+ super(CryptHash, self).__init__(algorithm)
+
+ if not HAS_CRYPT:
+ raise AnsibleError("crypt.crypt cannot be used as the 'crypt' python library is not installed or is unusable.", orig_exc=CRYPT_E)
+
+ if sys.platform.startswith('darwin'):
+ raise AnsibleError("crypt.crypt not supported on Mac OS X/Darwin, install passlib python module")
+
+ if algorithm not in self.algorithms:
+ raise AnsibleError("crypt.crypt does not support '%s' algorithm" % self.algorithm)
+
+ display.deprecated(
+ "Encryption using the Python crypt module is deprecated. The "
+ "Python crypt module is deprecated and will be removed from "
+ "Python 3.13. Install the passlib library for continued "
+ "encryption functionality.",
+ version=2.17
+ )
+
+ self.algo_data = self.algorithms[algorithm]
+
+ def hash(self, secret, salt=None, salt_size=None, rounds=None, ident=None):
+ salt = self._salt(salt, salt_size)
+ rounds = self._rounds(rounds)
+ ident = self._ident(ident)
+ return self._hash(secret, salt, rounds, ident)
+
+ def _salt(self, salt, salt_size):
+ salt_size = salt_size or self.algo_data.salt_size
+ ret = salt or random_salt(salt_size)
+ if re.search(r'[^./0-9A-Za-z]', ret):
+ raise AnsibleError("invalid characters in salt")
+ if self.algo_data.salt_exact and len(ret) != self.algo_data.salt_size:
+ raise AnsibleError("invalid salt size")
+ elif not self.algo_data.salt_exact and len(ret) > self.algo_data.salt_size:
+ raise AnsibleError("invalid salt size")
+ return ret
+
+ def _rounds(self, rounds):
+ if rounds == self.algo_data.implicit_rounds:
+ # Passlib does not include the rounds if it is the same as implicit_rounds.
+ # Make crypt lib behave the same, by not explicitly specifying the rounds in that case.
+ return None
+ else:
+ return rounds
+
+ def _ident(self, ident):
+ if not ident:
+ return self.algo_data.crypt_id
+ if self.algorithm == 'bcrypt':
+ return ident
+ return None
+
+ def _hash(self, secret, salt, rounds, ident):
+ saltstring = ""
+ if ident:
+ saltstring = "$%s" % ident
+
+ if rounds:
+ saltstring += "$rounds=%d" % rounds
+
+ saltstring += "$%s" % salt
+
+ # crypt.crypt on Python < 3.9 returns None if it cannot parse saltstring
+ # On Python >= 3.9, it throws OSError.
+ try:
+ result = crypt.crypt(secret, saltstring)
+ orig_exc = None
+ except OSError as e:
+ result = None
+ orig_exc = e
+
+ # None as result would be interpreted by the some modules (user module)
+ # as no password at all.
+ if not result:
+ raise AnsibleError(
+ "crypt.crypt does not support '%s' algorithm" % self.algorithm,
+ orig_exc=orig_exc,
+ )
+
+ return result
+
+
+class PasslibHash(BaseHash):
+ def __init__(self, algorithm):
+ super(PasslibHash, self).__init__(algorithm)
+
+ if not PASSLIB_AVAILABLE:
+ raise AnsibleError("passlib must be installed and usable to hash with '%s'" % algorithm, orig_exc=PASSLIB_E)
+
+ try:
+ self.crypt_algo = getattr(passlib.hash, algorithm)
+ except Exception:
+ raise AnsibleError("passlib does not support '%s' algorithm" % algorithm)
+
+ def hash(self, secret, salt=None, salt_size=None, rounds=None, ident=None):
+ salt = self._clean_salt(salt)
+ rounds = self._clean_rounds(rounds)
+ ident = self._clean_ident(ident)
+ return self._hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
+
+ def _clean_ident(self, ident):
+ ret = None
+ if not ident:
+ if self.algorithm in self.algorithms:
+ return self.algorithms.get(self.algorithm).implicit_ident
+ return ret
+ if self.algorithm == 'bcrypt':
+ return ident
+ return ret
+
+ def _clean_salt(self, salt):
+ if not salt:
+ return None
+ elif issubclass(self.crypt_algo.wrapped if isinstance(self.crypt_algo, PrefixWrapper) else self.crypt_algo, HasRawSalt):
+ ret = to_bytes(salt, encoding='ascii', errors='strict')
+ else:
+ ret = to_text(salt, encoding='ascii', errors='strict')
+
+ # Ensure the salt has the correct padding
+ if self.algorithm == 'bcrypt':
+ ret = bcrypt64.repair_unused(ret)
+
+ return ret
+
+ def _clean_rounds(self, rounds):
+ algo_data = self.algorithms.get(self.algorithm)
+ if rounds:
+ return rounds
+ elif algo_data and algo_data.implicit_rounds:
+ # The default rounds used by passlib depend on the passlib version.
+ # For consistency ensure that passlib behaves the same as crypt in case no rounds were specified.
+ # Thus use the crypt defaults.
+ return algo_data.implicit_rounds
+ else:
+ return None
+
+ def _hash(self, secret, salt, salt_size, rounds, ident):
+ # Not every hash algorithm supports every parameter.
+ # Thus create the settings dict only with set parameters.
+ settings = {}
+ if salt:
+ settings['salt'] = salt
+ if salt_size:
+ settings['salt_size'] = salt_size
+ if rounds:
+ settings['rounds'] = rounds
+ if ident:
+ settings['ident'] = ident
+
+ # starting with passlib 1.7 'using' and 'hash' should be used instead of 'encrypt'
+ if hasattr(self.crypt_algo, 'hash'):
+ result = self.crypt_algo.using(**settings).hash(secret)
+ elif hasattr(self.crypt_algo, 'encrypt'):
+ result = self.crypt_algo.encrypt(secret, **settings)
+ else:
+ raise AnsibleError("installed passlib version %s not supported" % passlib.__version__)
+
+ # passlib.hash should always return something or raise an exception.
+ # Still ensure that there is always a result.
+ # Otherwise an empty password might be assumed by some modules, like the user module.
+ if not result:
+ raise AnsibleError("failed to hash with algorithm '%s'" % self.algorithm)
+
+ # Hashes from passlib.hash should be represented as ascii strings of hex
+ # digits so this should not traceback. If it's not representable as such
+ # we need to traceback and then block such algorithms because it may
+ # impact calling code.
+ return to_text(result, errors='strict')
+
+
+def passlib_or_crypt(secret, algorithm, salt=None, salt_size=None, rounds=None, ident=None):
+ if PASSLIB_AVAILABLE:
+ return PasslibHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
+ if HAS_CRYPT:
+ return CryptHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
+ raise AnsibleError("Unable to encrypt nor hash, either crypt or passlib must be installed.", orig_exc=CRYPT_E)
+
+
+def do_encrypt(result, encrypt, salt_size=None, salt=None, ident=None):
+ return passlib_or_crypt(result, encrypt, salt_size=salt_size, salt=salt, ident=ident)
diff --git a/lib/ansible/utils/fqcn.py b/lib/ansible/utils/fqcn.py
new file mode 100644
index 0000000..a492be1
--- /dev/null
+++ b/lib/ansible/utils/fqcn.py
@@ -0,0 +1,33 @@
+# (c) 2020, Felix Fontein <felix@fontein.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def add_internal_fqcns(names):
+ '''
+ Given a sequence of action/module names, returns a list of these names
+ with the same names with the prefixes `ansible.builtin.` and
+ `ansible.legacy.` added for all names that are not already FQCNs.
+ '''
+ result = []
+ for name in names:
+ result.append(name)
+ if '.' not in name:
+ result.append('ansible.builtin.%s' % name)
+ result.append('ansible.legacy.%s' % name)
+ return result
diff --git a/lib/ansible/utils/galaxy.py b/lib/ansible/utils/galaxy.py
new file mode 100644
index 0000000..bbb26fb
--- /dev/null
+++ b/lib/ansible/utils/galaxy.py
@@ -0,0 +1,107 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+from subprocess import Popen, PIPE
+import tarfile
+
+import ansible.constants as C
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_text, to_native
+
+
+display = Display()
+
+
+def scm_archive_collection(src, name=None, version='HEAD'):
+ return scm_archive_resource(src, scm='git', name=name, version=version, keep_scm_meta=False)
+
+
+def scm_archive_resource(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
+
+ def run_scm_cmd(cmd, tempdir):
+ try:
+ stdout = ''
+ stderr = ''
+ popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = popen.communicate()
+ except Exception as e:
+ ran = " ".join(cmd)
+ display.debug("ran %s:" % ran)
+ raise AnsibleError("when executing %s: %s" % (ran, to_native(e)))
+ if popen.returncode != 0:
+ raise AnsibleError("- command %s failed in directory %s (rc=%s) - %s" % (' '.join(cmd), tempdir, popen.returncode, to_native(stderr)))
+
+ if scm not in ['hg', 'git']:
+ raise AnsibleError("- scm %s is not currently supported" % scm)
+
+ try:
+ scm_path = get_bin_path(scm)
+ except (ValueError, OSError, IOError):
+ raise AnsibleError("could not find/use %s, it is required to continue with installing %s" % (scm, src))
+
+ tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+ clone_cmd = [scm_path, 'clone']
+
+ # Add specific options for ignoring certificates if requested
+ ignore_certs = context.CLIARGS['ignore_certs']
+
+ if ignore_certs:
+ if scm == 'git':
+ clone_cmd.extend(['-c', 'http.sslVerify=false'])
+ elif scm == 'hg':
+ clone_cmd.append('--insecure')
+
+ clone_cmd.extend([src, name])
+
+ run_scm_cmd(clone_cmd, tempdir)
+
+ if scm == 'git' and version:
+ checkout_cmd = [scm_path, 'checkout', to_text(version)]
+ run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar', dir=C.DEFAULT_LOCAL_TMP)
+ archive_cmd = None
+ if keep_scm_meta:
+ display.vvv('tarring %s from %s to %s' % (name, tempdir, temp_file.name))
+ with tarfile.open(temp_file.name, "w") as tar:
+ tar.add(os.path.join(tempdir, name), arcname=name)
+ elif scm == 'hg':
+ archive_cmd = [scm_path, 'archive', '--prefix', "%s/" % name]
+ if version:
+ archive_cmd.extend(['-r', version])
+ archive_cmd.append(temp_file.name)
+ elif scm == 'git':
+ archive_cmd = [scm_path, 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
+ if version:
+ archive_cmd.append(version)
+ else:
+ archive_cmd.append('HEAD')
+
+ if archive_cmd is not None:
+ display.vvv('archiving %s' % archive_cmd)
+ run_scm_cmd(archive_cmd, os.path.join(tempdir, name))
+
+ return temp_file.name
diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py
new file mode 100644
index 0000000..71300d6
--- /dev/null
+++ b/lib/ansible/utils/hashing.py
@@ -0,0 +1,89 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from hashlib import sha1
+
+try:
+ from hashlib import md5 as _md5
+except ImportError:
+ # Assume we're running in FIPS mode here
+ _md5 = None
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes
+
+
+def secure_hash_s(data, hash_func=sha1):
+ ''' Return a secure hash hex digest of data. '''
+
+ digest = hash_func()
+ data = to_bytes(data, errors='surrogate_or_strict')
+ digest.update(data)
+ return digest.hexdigest()
+
+
+def secure_hash(filename, hash_func=sha1):
+ ''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
+
+ if not os.path.exists(to_bytes(filename, errors='surrogate_or_strict')) or os.path.isdir(to_bytes(filename, errors='strict')):
+ return None
+ digest = hash_func()
+ blocksize = 64 * 1024
+ try:
+ infile = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
+ block = infile.read(blocksize)
+ while block:
+ digest.update(block)
+ block = infile.read(blocksize)
+ infile.close()
+ except IOError as e:
+ raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ return digest.hexdigest()
+
+
+# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
+checksum = secure_hash
+checksum_s = secure_hash_s
+
+
+#
+# Backwards compat functions. Some modules include md5s in their return values
+# Continue to support that for now. As of ansible-1.8, all of those modules
+# should also return "checksum" (sha1 for now)
+# Do not use md5 unless it is needed for:
+# 1) Optional backwards compatibility
+# 2) Compliance with a third party protocol
+#
+# MD5 will not work on systems which are FIPS-140-2 compliant.
+#
+
+def md5s(data):
+ if not _md5:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return secure_hash_s(data, _md5)
+
+
+def md5(filename):
+ if not _md5:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return secure_hash(filename, _md5)
diff --git a/lib/ansible/utils/helpers.py b/lib/ansible/utils/helpers.py
new file mode 100644
index 0000000..658ad99
--- /dev/null
+++ b/lib/ansible/utils/helpers.py
@@ -0,0 +1,51 @@
+# (c) 2016, Ansible by Red Hat <info@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+
+
+def pct_to_int(value, num_items, min_value=1):
+ '''
+ Converts a given value to a percentage if specified as "x%",
+ otherwise converts the given value to an integer.
+ '''
+ if isinstance(value, string_types) and value.endswith('%'):
+ value_pct = int(value.replace("%", ""))
+ return int((value_pct / 100.0) * num_items) or min_value
+ else:
+ return int(value)
+
+
+def object_to_dict(obj, exclude=None):
+ """
+ Converts an object into a dict making the properties into keys, allows excluding certain keys
+ """
+ if exclude is None or not isinstance(exclude, list):
+ exclude = []
+ return dict((key, getattr(obj, key)) for key in dir(obj) if not (key.startswith('_') or key in exclude))
+
+
+def deduplicate_list(original_list):
+ """
+ Creates a deduplicated list with the order in which each item is first found.
+ """
+ seen = set()
+ return [x for x in original_list if x not in seen and not seen.add(x)]
diff --git a/lib/ansible/utils/jsonrpc.py b/lib/ansible/utils/jsonrpc.py
new file mode 100644
index 0000000..8d5b0f6
--- /dev/null
+++ b/lib/ansible/utils/jsonrpc.py
@@ -0,0 +1,113 @@
+# (c) 2017, Peter Sprygada <psprygad@redhat.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pickle
+import traceback
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import ConnectionError
+from ansible.module_utils.six import binary_type, text_type
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class JsonRpcServer(object):
+
+ _objects = set() # type: set[object]
+
+ def handle_request(self, request):
+ request = json.loads(to_text(request, errors='surrogate_then_replace'))
+
+ method = request.get('method')
+
+ if method.startswith('rpc.') or method.startswith('_'):
+ error = self.invalid_request()
+ return json.dumps(error)
+
+ args, kwargs = request.get('params')
+ setattr(self, '_identifier', request.get('id'))
+
+ rpc_method = None
+ for obj in self._objects:
+ rpc_method = getattr(obj, method, None)
+ if rpc_method:
+ break
+
+ if not rpc_method:
+ error = self.method_not_found()
+ response = json.dumps(error)
+ else:
+ try:
+ result = rpc_method(*args, **kwargs)
+ except ConnectionError as exc:
+ display.vvv(traceback.format_exc())
+ try:
+ error = self.error(code=exc.code, message=to_text(exc))
+ except AttributeError:
+ error = self.internal_error(data=to_text(exc))
+ response = json.dumps(error)
+ except Exception as exc:
+ display.vvv(traceback.format_exc())
+ error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
+ response = json.dumps(error)
+ else:
+ if isinstance(result, dict) and 'jsonrpc' in result:
+ response = result
+ else:
+ response = self.response(result)
+
+ try:
+ response = json.dumps(response)
+ except Exception as exc:
+ display.vvv(traceback.format_exc())
+ error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
+ response = json.dumps(error)
+
+ delattr(self, '_identifier')
+
+ return response
+
+ def register(self, obj):
+ self._objects.add(obj)
+
+ def header(self):
+ return {'jsonrpc': '2.0', 'id': self._identifier}
+
+ def response(self, result=None):
+ response = self.header()
+ if isinstance(result, binary_type):
+ result = to_text(result)
+ if not isinstance(result, text_type):
+ response["result_type"] = "pickle"
+ result = to_text(pickle.dumps(result, protocol=0))
+ response['result'] = result
+ return response
+
+ def error(self, code, message, data=None):
+ response = self.header()
+ error = {'code': code, 'message': message}
+ if data:
+ error['data'] = data
+ response['error'] = error
+ return response
+
+ # json-rpc standard errors (-32768 .. -32000)
+ def parse_error(self, data=None):
+ return self.error(-32700, 'Parse error', data)
+
+ def method_not_found(self, data=None):
+ return self.error(-32601, 'Method not found', data)
+
+ def invalid_request(self, data=None):
+ return self.error(-32600, 'Invalid request', data)
+
+ def invalid_params(self, data=None):
+ return self.error(-32602, 'Invalid params', data)
+
+ def internal_error(self, data=None):
+ return self.error(-32603, 'Internal error', data)
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
new file mode 100644
index 0000000..0e6a872
--- /dev/null
+++ b/lib/ansible/utils/listify.py
@@ -0,0 +1,46 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Iterable
+
+from ansible.module_utils.six import string_types
+from ansible.utils.display import Display
+
+display = Display()
+
+__all__ = ['listify_lookup_plugin_terms']
+
+
+def listify_lookup_plugin_terms(terms, templar, loader=None, fail_on_undefined=True, convert_bare=False):
+
+ if loader is not None:
+ display.deprecated('"listify_lookup_plugin_terms" does not use "dataloader" anymore, the ability to pass it in will be removed in future versions.',
+ version='2.18')
+
+ if isinstance(terms, string_types):
+ terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
+ else:
+ terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
+
+ if isinstance(terms, string_types) or not isinstance(terms, Iterable):
+ terms = [terms]
+
+ return terms
diff --git a/lib/ansible/utils/lock.py b/lib/ansible/utils/lock.py
new file mode 100644
index 0000000..34387dc
--- /dev/null
+++ b/lib/ansible/utils/lock.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from functools import wraps
+
+
+def lock_decorator(attr='missing_lock_attr', lock=None):
+ '''This decorator is a generic implementation that allows you
+ to either use a pre-defined instance attribute as the location
+ of the lock, or to explicitly pass a lock object.
+
+ This code was implemented with ``threading.Lock`` in mind, but
+ may work with other locks, assuming that they function as
+ context managers.
+
+ When using ``attr``, the assumption is the first argument to
+ the wrapped method, is ``self`` or ``cls``.
+
+ Examples:
+
+ @lock_decorator(attr='_callback_lock')
+ def send_callback(...):
+
+ @lock_decorator(lock=threading.Lock())
+ def some_method(...):
+ '''
+ def outer(func):
+ @wraps(func)
+ def inner(*args, **kwargs):
+ # Python2 doesn't have ``nonlocal``
+ # assign the actual lock to ``_lock``
+ if lock is None:
+ _lock = getattr(args[0], attr)
+ else:
+ _lock = lock
+ with _lock:
+ return func(*args, **kwargs)
+ return inner
+ return outer
diff --git a/lib/ansible/utils/multiprocessing.py b/lib/ansible/utils/multiprocessing.py
new file mode 100644
index 0000000..2912f71
--- /dev/null
+++ b/lib/ansible/utils/multiprocessing.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2019 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+
+# Explicit multiprocessing context using the fork start method
+# This exists as a compat layer now that Python3.8 has changed the default
+# start method for macOS to ``spawn`` which is incompatible with our
+# code base currently
+#
+# This exists in utils to allow it to be easily imported into various places
+# without causing circular import or dependency problems
+context = multiprocessing.get_context('fork')
diff --git a/lib/ansible/utils/native_jinja.py b/lib/ansible/utils/native_jinja.py
new file mode 100644
index 0000000..53ef140
--- /dev/null
+++ b/lib/ansible/utils/native_jinja.py
@@ -0,0 +1,13 @@
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.six import text_type
+
+
+class NativeJinjaText(text_type):
+ pass
diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py
new file mode 100644
index 0000000..f876add
--- /dev/null
+++ b/lib/ansible/utils/path.py
@@ -0,0 +1,161 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shutil
+
+from errno import EEXIST
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+__all__ = ['unfrackpath', 'makedirs_safe']
+
+
+def unfrackpath(path, follow=True, basedir=None):
+ '''
+ Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
+
+ :arg path: A byte or text string representing a path to be canonicalized
+ :arg follow: A boolean to indicate of symlinks should be resolved or not
+ :raises UnicodeDecodeError: If the canonicalized version of the path
+ contains non-utf8 byte sequences.
+ :rtype: A text string (unicode on pyyhon2, str on python3).
+ :returns: An absolute path with symlinks, environment variables, and tilde
+ expanded. Note that this does not check whether a path exists.
+
+ example::
+ '$HOME/../../var/mail' becomes '/var/spool/mail'
+ '''
+
+ b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
+
+ if b_basedir is None:
+ b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
+ elif os.path.isfile(b_basedir):
+ b_basedir = os.path.dirname(b_basedir)
+
+ b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
+
+ if not os.path.isabs(b_final_path):
+ b_final_path = os.path.join(b_basedir, b_final_path)
+
+ if follow:
+ b_final_path = os.path.realpath(b_final_path)
+
+ return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
+
+
+def makedirs_safe(path, mode=None):
+ '''
+ A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
+ refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
+ the directory chain. This function is not safe to use under world-writable locations when the first level of the
+ path to be created contains a predictable component. Always create a randomly-named element first if there is any
+ chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
+ disclosure or modification of sensitive file contents.
+
+ :arg path: A byte or text string representing a directory chain to be created
+ :kwarg mode: If given, the mode to set the directory to
+ :raises AnsibleError: If the directory cannot be created and does not already exist.
+ :raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
+ '''
+
+ rpath = unfrackpath(path)
+ b_rpath = to_bytes(rpath)
+ if not os.path.exists(b_rpath):
+ try:
+ if mode:
+ os.makedirs(b_rpath, mode)
+ else:
+ os.makedirs(b_rpath)
+ except OSError as e:
+ if e.errno != EEXIST:
+ raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
+
+
+def basedir(source):
+ """ returns directory for inventory or playbook """
+ source = to_bytes(source, errors='surrogate_or_strict')
+ dname = None
+ if os.path.isdir(source):
+ dname = source
+ elif source in [None, '', '.']:
+ dname = os.getcwd()
+ elif os.path.isfile(source):
+ dname = os.path.dirname(source)
+
+ if dname:
+ # don't follow symlinks for basedir, enables source re-use
+ dname = os.path.abspath(dname)
+
+ return to_text(dname, errors='surrogate_or_strict')
+
+
+def cleanup_tmp_file(path, warn=False):
+ """
+ Removes temporary file or directory. Optionally display a warning if unable
+ to remove the file or directory.
+
+ :arg path: Path to file or directory to be removed
+ :kwarg warn: Whether or not to display a warning when the file or directory
+ cannot be removed
+ """
+ try:
+ if os.path.exists(path):
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ elif os.path.isfile(path):
+ os.unlink(path)
+ except Exception as e:
+ if warn:
+ # Importing here to avoid circular import
+ from ansible.utils.display import Display
+ display = Display()
+ display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
+ except Exception:
+ pass
+
+
+def is_subpath(child, parent, real=False):
+ """
+ Compares paths to check if one is contained in the other
+ :arg: child: Path to test
+ :arg parent; Path to test against
+ """
+ test = False
+
+ abs_child = unfrackpath(child, follow=False)
+ abs_parent = unfrackpath(parent, follow=False)
+
+ if real:
+ abs_child = os.path.realpath(abs_child)
+ abs_parent = os.path.realpath(abs_parent)
+
+ c = abs_child.split(os.path.sep)
+ p = abs_parent.split(os.path.sep)
+
+ try:
+ test = c[:len(p)] == p
+ except IndexError:
+ # child is shorter than parent so cannot be subpath
+ pass
+
+ return test
diff --git a/lib/ansible/utils/plugin_docs.py b/lib/ansible/utils/plugin_docs.py
new file mode 100644
index 0000000..3af2678
--- /dev/null
+++ b/lib/ansible/utils/plugin_docs.py
@@ -0,0 +1,351 @@
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import MutableMapping, MutableSet, MutableSequence
+from pathlib import Path
+
+from ansible import constants as C
+from ansible.release import __version__ as ansible_version
+from ansible.errors import AnsibleError, AnsibleParserError, AnsiblePluginNotFound
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.parsing.plugin_docs import read_docstring
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def merge_fragment(target, source):
+
+ for key, value in source.items():
+ if key in target:
+ # assumes both structures have same type
+ if isinstance(target[key], MutableMapping):
+ value.update(target[key])
+ elif isinstance(target[key], MutableSet):
+ value.add(target[key])
+ elif isinstance(target[key], MutableSequence):
+ value = sorted(frozenset(value + target[key]))
+ else:
+ raise Exception("Attempt to extend a documentation fragment, invalid type for %s" % key)
+ target[key] = value
+
+
+def _process_versions_and_dates(fragment, is_module, return_docs, callback):
+ def process_deprecation(deprecation, top_level=False):
+ collection_name = 'removed_from_collection' if top_level else 'collection_name'
+ if not isinstance(deprecation, MutableMapping):
+ return
+ if (is_module or top_level) and 'removed_in' in deprecation: # used in module deprecations
+ callback(deprecation, 'removed_in', collection_name)
+ if 'removed_at_date' in deprecation:
+ callback(deprecation, 'removed_at_date', collection_name)
+ if not (is_module or top_level) and 'version' in deprecation: # used in plugin option deprecations
+ callback(deprecation, 'version', collection_name)
+
+ def process_option_specifiers(specifiers):
+ for specifier in specifiers:
+ if not isinstance(specifier, MutableMapping):
+ continue
+ if 'version_added' in specifier:
+ callback(specifier, 'version_added', 'version_added_collection')
+ if isinstance(specifier.get('deprecated'), MutableMapping):
+ process_deprecation(specifier['deprecated'])
+
+ def process_options(options):
+ for option in options.values():
+ if not isinstance(option, MutableMapping):
+ continue
+ if 'version_added' in option:
+ callback(option, 'version_added', 'version_added_collection')
+ if not is_module:
+ if isinstance(option.get('env'), list):
+ process_option_specifiers(option['env'])
+ if isinstance(option.get('ini'), list):
+ process_option_specifiers(option['ini'])
+ if isinstance(option.get('vars'), list):
+ process_option_specifiers(option['vars'])
+ if isinstance(option.get('deprecated'), MutableMapping):
+ process_deprecation(option['deprecated'])
+ if isinstance(option.get('suboptions'), MutableMapping):
+ process_options(option['suboptions'])
+
+ def process_return_values(return_values):
+ for return_value in return_values.values():
+ if not isinstance(return_value, MutableMapping):
+ continue
+ if 'version_added' in return_value:
+ callback(return_value, 'version_added', 'version_added_collection')
+ if isinstance(return_value.get('contains'), MutableMapping):
+ process_return_values(return_value['contains'])
+
+ def process_attributes(attributes):
+ for attribute in attributes.values():
+ if not isinstance(attribute, MutableMapping):
+ continue
+ if 'version_added' in attribute:
+ callback(attribute, 'version_added', 'version_added_collection')
+
+ if not fragment:
+ return
+
+ if return_docs:
+ process_return_values(fragment)
+ return
+
+ if 'version_added' in fragment:
+ callback(fragment, 'version_added', 'version_added_collection')
+ if isinstance(fragment.get('deprecated'), MutableMapping):
+ process_deprecation(fragment['deprecated'], top_level=True)
+ if isinstance(fragment.get('options'), MutableMapping):
+ process_options(fragment['options'])
+ if isinstance(fragment.get('attributes'), MutableMapping):
+ process_attributes(fragment['attributes'])
+
+
+def add_collection_to_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
+ def add(options, option, collection_name_field):
+ if collection_name_field not in options:
+ options[collection_name_field] = collection_name
+
+ _process_versions_and_dates(fragment, is_module, return_docs, add)
+
+
+def remove_current_collection_from_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
+ def remove(options, option, collection_name_field):
+ if options.get(collection_name_field) == collection_name:
+ del options[collection_name_field]
+
+ _process_versions_and_dates(fragment, is_module, return_docs, remove)
+
+
+def add_fragments(doc, filename, fragment_loader, is_module=False):
+
+ fragments = doc.pop('extends_documentation_fragment', [])
+
+ if isinstance(fragments, string_types):
+ fragments = [fragments]
+
+ unknown_fragments = []
+
+ # doc_fragments are allowed to specify a fragment var other than DOCUMENTATION
+ # with a . separator; this is complicated by collections-hosted doc_fragments that
+ # use the same separator. Assume it's collection-hosted normally first, try to load
+ # as-specified. If failure, assume the right-most component is a var, split it off,
+ # and retry the load.
+ for fragment_slug in fragments:
+ fragment_name = fragment_slug
+ fragment_var = 'DOCUMENTATION'
+
+ fragment_class = fragment_loader.get(fragment_name)
+ if fragment_class is None and '.' in fragment_slug:
+ splitname = fragment_slug.rsplit('.', 1)
+ fragment_name = splitname[0]
+ fragment_var = splitname[1].upper()
+ fragment_class = fragment_loader.get(fragment_name)
+
+ if fragment_class is None:
+ unknown_fragments.append(fragment_slug)
+ continue
+
+ fragment_yaml = getattr(fragment_class, fragment_var, None)
+ if fragment_yaml is None:
+ if fragment_var != 'DOCUMENTATION':
+ # if it's asking for something specific that's missing, that's an error
+ unknown_fragments.append(fragment_slug)
+ continue
+ else:
+ fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below...
+
+ fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
+
+ real_fragment_name = getattr(fragment_class, 'ansible_name')
+ real_collection_name = '.'.join(real_fragment_name.split('.')[0:2]) if '.' in real_fragment_name else ''
+ add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module)
+
+ if 'notes' in fragment:
+ notes = fragment.pop('notes')
+ if notes:
+ if 'notes' not in doc:
+ doc['notes'] = []
+ doc['notes'].extend(notes)
+
+ if 'seealso' in fragment:
+ seealso = fragment.pop('seealso')
+ if seealso:
+ if 'seealso' not in doc:
+ doc['seealso'] = []
+ doc['seealso'].extend(seealso)
+
+ if 'options' not in fragment and 'attributes' not in fragment:
+ raise Exception("missing options or attributes in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename))
+
+ # ensure options themselves are directly merged
+ for doc_key in ['options', 'attributes']:
+ if doc_key in fragment:
+ if doc_key in doc:
+ try:
+ merge_fragment(doc[doc_key], fragment.pop(doc_key))
+ except Exception as e:
+ raise AnsibleError("%s %s (%s) of unknown type: %s" % (to_native(e), doc_key, fragment_name, filename))
+ else:
+ doc[doc_key] = fragment.pop(doc_key)
+
+ # merge rest of the sections
+ try:
+ merge_fragment(doc, fragment)
+ except Exception as e:
+ raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
+
+ if unknown_fragments:
+ raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
+
+
+def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False, collection_name=None, is_module=None, plugin_type=None):
+ """
+ DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the doc_fragments plugins.
+ """
+
+ if is_module is None:
+ if plugin_type is None:
+ is_module = False
+ else:
+ is_module = (plugin_type == 'module')
+ else:
+ # TODO deprecate is_module argument, now that we have 'type'
+ pass
+
+ data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors)
+
+ if data.get('doc', False):
+ # add collection name to versions and dates
+ if collection_name is not None:
+ add_collection_to_versions_and_dates(data['doc'], collection_name, is_module=is_module)
+
+ # add fragments to documentation
+ add_fragments(data['doc'], filename, fragment_loader=fragment_loader, is_module=is_module)
+
+ if data.get('returndocs', False):
+ # add collection name to versions and dates
+ if collection_name is not None:
+ add_collection_to_versions_and_dates(data['returndocs'], collection_name, is_module=is_module, return_docs=True)
+
+ return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
+
+
+def get_versioned_doclink(path):
+ """
+ returns a versioned documentation link for the current Ansible major.minor version; used to generate
+ in-product warning/error links to the configured DOCSITE_ROOT_URL
+ (eg, https://docs.ansible.com/ansible/2.8/somepath/doc.html)
+
+ :param path: relative path to a document under docs/docsite/rst;
+ :return: absolute URL to the specified doc for the current version of Ansible
+ """
+ path = to_native(path)
+ try:
+ base_url = C.config.get_config_value('DOCSITE_ROOT_URL')
+ if not base_url.endswith('/'):
+ base_url += '/'
+ if path.startswith('/'):
+ path = path[1:]
+ split_ver = ansible_version.split('.')
+ if len(split_ver) < 3:
+ raise RuntimeError('invalid version ({0})'.format(ansible_version))
+
+ doc_version = '{0}.{1}'.format(split_ver[0], split_ver[1])
+
+ # check to see if it's a X.Y.0 non-rc prerelease or dev release, if so, assume devel (since the X.Y doctree
+ # isn't published until beta-ish)
+ if split_ver[2].startswith('0'):
+ # exclude rc; we should have the X.Y doctree live by rc1
+ if any((pre in split_ver[2]) for pre in ['a', 'b']) or len(split_ver) > 3 and 'dev' in split_ver[3]:
+ doc_version = 'devel'
+
+ return '{0}{1}/{2}'.format(base_url, doc_version, path)
+ except Exception as ex:
+ return '(unable to create versioned doc link for path {0}: {1})'.format(path, to_native(ex))
+
+
+def _find_adjacent(path, plugin, extensions):
+
+ adjacent = Path(path)
+
+ plugin_base_name = plugin.split('.')[-1]
+ if adjacent.stem != plugin_base_name:
+ # this should only affect filters/tests
+ adjacent = adjacent.with_name(plugin_base_name)
+
+ paths = []
+ for ext in extensions:
+ candidate = adjacent.with_suffix(ext)
+ if candidate == adjacent:
+ # we're looking for an adjacent file, skip this since it's identical
+ continue
+ if candidate.exists():
+ paths.append(to_native(candidate))
+
+ return paths
+
+
+def find_plugin_docfile(plugin, plugin_type, loader):
+ ''' if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding 'sidecar' file for docs '''
+
+ context = loader.find_plugin_with_context(plugin, ignore_deprecated=False, check_aliases=True)
+ if (not context or not context.resolved) and plugin_type in ('filter', 'test'):
+ # should only happen for filters/test
+ plugin_obj, context = loader.get_with_context(plugin)
+
+ if not context or not context.resolved:
+ raise AnsiblePluginNotFound('%s was not found' % (plugin), plugin_load_context=context)
+
+ docfile = Path(context.plugin_resolved_path)
+ if docfile.suffix not in C.DOC_EXTENSIONS:
+ # only look for adjacent if plugin file does not support documents
+ filenames = _find_adjacent(docfile, plugin, C.DOC_EXTENSIONS)
+ filename = filenames[0] if filenames else None
+ else:
+ filename = to_native(docfile)
+
+ if filename is None:
+ raise AnsibleError('%s cannot contain DOCUMENTATION nor does it have a companion documentation file' % (plugin))
+
+ return filename, context.plugin_resolved_collection
+
+
+def get_plugin_docs(plugin, plugin_type, loader, fragment_loader, verbose):
+
+ docs = []
+
+ # find plugin doc file, if it doesn't exist this will throw error, we let it through
+ # can raise exception and short circuit when 'not found'
+ filename, collection_name = find_plugin_docfile(plugin, plugin_type, loader)
+
+ try:
+ docs = get_docstring(filename, fragment_loader, verbose=verbose, collection_name=collection_name, plugin_type=plugin_type)
+ except Exception as e:
+ raise AnsibleParserError('%s did not contain a DOCUMENTATION attribute (%s)' % (plugin, filename), orig_exc=e)
+
+ # no good? try adjacent
+ if not docs[0]:
+ for newfile in _find_adjacent(filename, plugin, C.DOC_EXTENSIONS):
+ try:
+ docs = get_docstring(newfile, fragment_loader, verbose=verbose, collection_name=collection_name, plugin_type=plugin_type)
+ filename = newfile
+ if docs[0] is not None:
+ break
+ except Exception as e:
+ raise AnsibleParserError('Adjacent file %s did not contain a DOCUMENTATION attribute (%s)' % (plugin, filename), orig_exc=e)
+
+ # add extra data to docs[0] (aka 'DOCUMENTATION')
+ if docs[0] is None:
+ raise AnsibleParserError('No documentation available for %s (%s)' % (plugin, filename))
+ else:
+ docs[0]['filename'] = filename
+ docs[0]['collection'] = collection_name
+
+ return docs
diff --git a/lib/ansible/utils/py3compat.py b/lib/ansible/utils/py3compat.py
new file mode 100644
index 0000000..88d9fdf
--- /dev/null
+++ b/lib/ansible/utils/py3compat.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Toshio Kuratomi <a.badger@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Note that the original author of this, Toshio Kuratomi, is trying to submit this to six. If
+# successful, the code in six will be available under six's more liberal license:
+# https://mail.python.org/pipermail/python-porting/2018-July/000539.html
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from collections.abc import MutableMapping
+
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes, to_text
+
+__all__ = ('environ',)
+
+
+class _TextEnviron(MutableMapping):
+ """
+ Utility class to return text strings from the environment instead of byte strings
+
+ Mimics the behaviour of os.environ on Python3
+ """
+ def __init__(self, env=None, encoding=None):
+ if env is None:
+ env = os.environ
+ self._raw_environ = env
+ self._value_cache = {}
+ # Since we're trying to mimic Python3's os.environ, use sys.getfilesystemencoding()
+ # instead of utf-8
+ if encoding is None:
+ # Since we're trying to mimic Python3's os.environ, use sys.getfilesystemencoding()
+ # instead of utf-8
+ self.encoding = sys.getfilesystemencoding()
+ else:
+ self.encoding = encoding
+
+ def __delitem__(self, key):
+ del self._raw_environ[key]
+
+ def __getitem__(self, key):
+ value = self._raw_environ[key]
+ if PY3:
+ return value
+ # Cache keys off of the undecoded values to handle any environment variables which change
+ # during a run
+ if value not in self._value_cache:
+ self._value_cache[value] = to_text(value, encoding=self.encoding,
+ nonstring='passthru', errors='surrogate_or_strict')
+ return self._value_cache[value]
+
+ def __setitem__(self, key, value):
+ self._raw_environ[key] = to_bytes(value, encoding=self.encoding, nonstring='strict',
+ errors='surrogate_or_strict')
+
+ def __iter__(self):
+ return self._raw_environ.__iter__()
+
+ def __len__(self):
+ return len(self._raw_environ)
+
+
+environ = _TextEnviron(encoding='utf-8')
diff --git a/lib/ansible/utils/sentinel.py b/lib/ansible/utils/sentinel.py
new file mode 100644
index 0000000..ca4f827
--- /dev/null
+++ b/lib/ansible/utils/sentinel.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class Sentinel:
+ """
+ Object which can be used to mark whether an entry as being special
+
+ A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the
+ Null byte is used as a sentinel for the end of a string. In Python, None is often used as
+ a Sentinel in optional parameters to mean that the parameter was not set by the user.
+
+ You should use None as a Sentinel value any Python code where None is not a valid entry. If
+ None is a valid entry, though, then you need to create a different value, which is the purpose
+ of this class.
+
+ Example of using Sentinel as a default parameter value::
+
+ def confirm_big_red_button(tristate=Sentinel):
+ if tristate is Sentinel:
+ print('You must explicitly press the big red button to blow up the base')
+ elif tristate is True:
+ print('Countdown to destruction activated')
+ elif tristate is False:
+ print('Countdown stopped')
+ elif tristate is None:
+ print('Waiting for more input')
+
+ Example of using Sentinel to tell whether a dict which has a default value has been changed::
+
+ values = {'one': Sentinel, 'two': Sentinel}
+ defaults = {'one': 1, 'two': 2}
+
+ # [.. Other code which does things including setting a new value for 'one' ..]
+ values['one'] = None
+ # [..]
+
+ print('You made changes to:')
+ for key, value in values.items():
+ if value is Sentinel:
+ continue
+ print('%s: %s' % (key, value)
+ """
+
+ def __new__(cls):
+ """
+ Return the cls itself. This makes both equality and identity True for comparing the class
+ to an instance of the class, preventing common usage errors.
+
+ Preferred usage::
+
+ a = Sentinel
+ if a is Sentinel:
+ print('Sentinel value')
+
+ However, these are True as well, eliminating common usage errors::
+
+ if Sentinel is Sentinel():
+ print('Sentinel value')
+
+ if Sentinel == Sentinel():
+ print('Sentinel value')
+ """
+ return cls
diff --git a/lib/ansible/utils/shlex.py b/lib/ansible/utils/shlex.py
new file mode 100644
index 0000000..5e82021
--- /dev/null
+++ b/lib/ansible/utils/shlex.py
@@ -0,0 +1,34 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# alongwith Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shlex
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes, to_text
+
+
+if PY3:
+ # shlex.split() wants Unicode (i.e. ``str``) input on Python 3
+ shlex_split = shlex.split
+else:
+ # shlex.split() wants bytes (i.e. ``str``) input on Python 2
+ def shlex_split(s, comments=False, posix=True):
+ return map(to_text, shlex.split(to_bytes(s), comments, posix))
+ shlex_split.__doc__ = shlex.split.__doc__
diff --git a/lib/ansible/utils/singleton.py b/lib/ansible/utils/singleton.py
new file mode 100644
index 0000000..4299403
--- /dev/null
+++ b/lib/ansible/utils/singleton.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from threading import RLock
+
+
+class Singleton(type):
+ """Metaclass for classes that wish to implement Singleton
+ functionality. If an instance of the class exists, it's returned,
+ otherwise a single instance is instantiated and returned.
+ """
+ def __init__(cls, name, bases, dct):
+ super(Singleton, cls).__init__(name, bases, dct)
+ cls.__instance = None
+ cls.__rlock = RLock()
+
+ def __call__(cls, *args, **kw):
+ if cls.__instance is not None:
+ return cls.__instance
+
+ with cls.__rlock:
+ if cls.__instance is None:
+ cls.__instance = super(Singleton, cls).__call__(*args, **kw)
+
+ return cls.__instance
diff --git a/lib/ansible/utils/ssh_functions.py b/lib/ansible/utils/ssh_functions.py
new file mode 100644
index 0000000..a728889
--- /dev/null
+++ b/lib/ansible/utils/ssh_functions.py
@@ -0,0 +1,66 @@
+# (c) 2016, James Tanner
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import subprocess
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.compat.paramiko import paramiko
+
+
+_HAS_CONTROLPERSIST = {} # type: dict[str, bool]
+
+
+def check_for_controlpersist(ssh_executable):
+ try:
+ # If we've already checked this executable
+ return _HAS_CONTROLPERSIST[ssh_executable]
+ except KeyError:
+ pass
+
+ b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
+ has_cp = True
+ try:
+ cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ if b"Bad configuration option" in err or b"Usage:" in err:
+ has_cp = False
+ except OSError:
+ has_cp = False
+
+ _HAS_CONTROLPERSIST[ssh_executable] = has_cp
+ return has_cp
+
+
+# TODO: move to 'smart' connection plugin that subclasses to ssh/paramiko as needed.
+def set_default_transport():
+
+ # deal with 'smart' connection .. one time ..
+ if C.DEFAULT_TRANSPORT == 'smart':
+ # TODO: check if we can deprecate this as ssh w/o control persist should
+ # not be as common anymore.
+
+ # see if SSH can support ControlPersist if not use paramiko
+ if not check_for_controlpersist('ssh') and paramiko is not None:
+ C.DEFAULT_TRANSPORT = "paramiko"
+ else:
+ C.DEFAULT_TRANSPORT = "ssh"
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
new file mode 100644
index 0000000..1218a6e
--- /dev/null
+++ b/lib/ansible/utils/unicode.py
@@ -0,0 +1,33 @@
+# (c) 2012-2014, Toshio Kuratomi <a.badger@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_text
+
+
+__all__ = ('unicode_wrap',)
+
+
+def unicode_wrap(func, *args, **kwargs):
+ """If a function returns a string, force it to be a text string.
+
+ Use with partial to ensure that filter plugins will return text values.
+ """
+ return to_text(func(*args, **kwargs), nonstring='passthru')
diff --git a/lib/ansible/utils/unsafe_proxy.py b/lib/ansible/utils/unsafe_proxy.py
new file mode 100644
index 0000000..d78ebf6
--- /dev/null
+++ b/lib/ansible/utils/unsafe_proxy.py
@@ -0,0 +1,128 @@
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
+# retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+#
+# Original Python Recipe for Proxy:
+# http://code.activestate.com/recipes/496741-object-proxying/
+# Author: Tomer Filiba
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping, Set
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six import string_types, binary_type, text_type
+from ansible.utils.native_jinja import NativeJinjaText
+
+
+__all__ = ['AnsibleUnsafe', 'wrap_var']
+
+
+class AnsibleUnsafe(object):
+ __UNSAFE__ = True
+
+
+class AnsibleUnsafeBytes(binary_type, AnsibleUnsafe):
+ def decode(self, *args, **kwargs):
+ """Wrapper method to ensure type conversions maintain unsafe context"""
+ return AnsibleUnsafeText(super(AnsibleUnsafeBytes, self).decode(*args, **kwargs))
+
+
+class AnsibleUnsafeText(text_type, AnsibleUnsafe):
+ def encode(self, *args, **kwargs):
+ """Wrapper method to ensure type conversions maintain unsafe context"""
+ return AnsibleUnsafeBytes(super(AnsibleUnsafeText, self).encode(*args, **kwargs))
+
+
+class NativeJinjaUnsafeText(NativeJinjaText, AnsibleUnsafeText):
+ pass
+
+
+def _wrap_dict(v):
+ return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
+
+
+def _wrap_sequence(v):
+ """Wraps a sequence with unsafe, not meant for strings, primarily
+ ``tuple`` and ``list``
+ """
+ v_type = type(v)
+ return v_type(wrap_var(item) for item in v)
+
+
+def _wrap_set(v):
+ return set(wrap_var(item) for item in v)
+
+
+def wrap_var(v):
+ if v is None or isinstance(v, AnsibleUnsafe):
+ return v
+
+ if isinstance(v, Mapping):
+ v = _wrap_dict(v)
+ elif isinstance(v, Set):
+ v = _wrap_set(v)
+ elif is_sequence(v):
+ v = _wrap_sequence(v)
+ elif isinstance(v, NativeJinjaText):
+ v = NativeJinjaUnsafeText(v)
+ elif isinstance(v, binary_type):
+ v = AnsibleUnsafeBytes(v)
+ elif isinstance(v, text_type):
+ v = AnsibleUnsafeText(v)
+
+ return v
+
+
+def to_unsafe_bytes(*args, **kwargs):
+ return wrap_var(to_bytes(*args, **kwargs))
+
+
+def to_unsafe_text(*args, **kwargs):
+ return wrap_var(to_text(*args, **kwargs))
diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py
new file mode 100644
index 0000000..a3224c8
--- /dev/null
+++ b/lib/ansible/utils/vars.py
@@ -0,0 +1,293 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import keyword
+import random
+import uuid
+
+from collections.abc import MutableMapping, MutableSequence
+from json import dumps
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils.six import string_types, PY3
+from ansible.module_utils._text import to_native, to_text
+from ansible.parsing.splitter import parse_kv
+
+
+ADDITIONAL_PY2_KEYWORDS = frozenset(("True", "False", "None"))
+
+_MAXSIZE = 2 ** 32
+cur_id = 0
+node_mac = ("%012x" % uuid.getnode())[:12]
+random_int = ("%08x" % random.randint(0, _MAXSIZE))[:8]
+
+
+def get_unique_id():
+ global cur_id
+ cur_id += 1
+ return "-".join([
+ node_mac[0:8],
+ node_mac[8:12],
+ random_int[0:4],
+ random_int[4:8],
+ ("%012x" % cur_id)[:12],
+ ])
+
+
+def _validate_mutable_mappings(a, b):
+ """
+ Internal convenience function to ensure arguments are MutableMappings
+
+ This checks that all arguments are MutableMappings or raises an error
+
+ :raises AnsibleError: if one of the arguments is not a MutableMapping
+ """
+
+ # If this becomes generally needed, change the signature to operate on
+ # a variable number of arguments instead.
+
+ if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
+ myvars = []
+ for x in [a, b]:
+ try:
+ myvars.append(dumps(x))
+ except Exception:
+ myvars.append(to_native(x))
+ raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
+ a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
+ )
+
+
+def combine_vars(a, b, merge=None):
+ """
+ Return a copy of dictionaries of variables based on configured hash behavior
+ """
+
+ if merge or merge is None and C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return merge_hash(a, b)
+ else:
+ # HASH_BEHAVIOUR == 'replace'
+ _validate_mutable_mappings(a, b)
+ result = a | b
+ return result
+
+
+def merge_hash(x, y, recursive=True, list_merge='replace'):
+ """
+ Return a new dictionary result of the merges of y into x,
+ so that keys from y take precedence over keys from x.
+ (x and y aren't modified)
+ """
+ if list_merge not in ('replace', 'keep', 'append', 'prepend', 'append_rp', 'prepend_rp'):
+ raise AnsibleError("merge_hash: 'list_merge' argument can only be equal to 'replace', 'keep', 'append', 'prepend', 'append_rp' or 'prepend_rp'")
+
+ # verify x & y are dicts
+ _validate_mutable_mappings(x, y)
+
+ # to speed things up: if x is empty or equal to y, return y
+ # (this `if` can be remove without impact on the function
+ # except performance)
+ if x == {} or x == y:
+ return y.copy()
+
+ # in the following we will copy elements from y to x, but
+ # we don't want to modify x, so we create a copy of it
+ x = x.copy()
+
+ # to speed things up: use dict.update if possible
+ # (this `if` can be remove without impact on the function
+ # except performance)
+ if not recursive and list_merge == 'replace':
+ x.update(y)
+ return x
+
+ # insert each element of y in x, overriding the one in x
+ # (as y has higher priority)
+ # we copy elements from y to x instead of x to y because
+ # there is a high probability x will be the "default" dict the user
+ # want to "patch" with y
+ # therefore x will have much more elements than y
+ for key, y_value in y.items():
+ # if `key` isn't in x
+ # update x and move on to the next element of y
+ if key not in x:
+ x[key] = y_value
+ continue
+ # from this point we know `key` is in x
+
+ x_value = x[key]
+
+ # if both x's element and y's element are dicts
+ # recursively "combine" them or override x's with y's element
+ # depending on the `recursive` argument
+ # and move on to the next element of y
+ if isinstance(x_value, MutableMapping) and isinstance(y_value, MutableMapping):
+ if recursive:
+ x[key] = merge_hash(x_value, y_value, recursive, list_merge)
+ else:
+ x[key] = y_value
+ continue
+
+ # if both x's element and y's element are lists
+ # "merge" them depending on the `list_merge` argument
+ # and move on to the next element of y
+ if isinstance(x_value, MutableSequence) and isinstance(y_value, MutableSequence):
+ if list_merge == 'replace':
+ # replace x value by y's one as it has higher priority
+ x[key] = y_value
+ elif list_merge == 'append':
+ x[key] = x_value + y_value
+ elif list_merge == 'prepend':
+ x[key] = y_value + x_value
+ elif list_merge == 'append_rp':
+ # append all elements from y_value (high prio) to x_value (low prio)
+ # and remove x_value elements that are also in y_value
+ # we don't remove elements from x_value nor y_value that were already in double
+ # (we assume that there is a reason if there where such double elements)
+ # _rp stands for "remove present"
+ x[key] = [z for z in x_value if z not in y_value] + y_value
+ elif list_merge == 'prepend_rp':
+ # same as 'append_rp' but y_value elements are prepend
+ x[key] = y_value + [z for z in x_value if z not in y_value]
+ # else 'keep'
+ # keep x value even if y it's of higher priority
+ # it's done by not changing x[key]
+ continue
+
+ # else just override x's element with y's one
+ x[key] = y_value
+
+ return x
+
+
+def load_extra_vars(loader):
+ extra_vars = {}
+ for extra_vars_opt in context.CLIARGS.get('extra_vars', tuple()):
+ data = None
+ extra_vars_opt = to_text(extra_vars_opt, errors='surrogate_or_strict')
+ if extra_vars_opt is None or not extra_vars_opt:
+ continue
+
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ data = loader.load_from_file(extra_vars_opt[1:])
+ elif extra_vars_opt[0] in [u'/', u'.']:
+ raise AnsibleOptionsError("Please prepend extra_vars filename '%s' with '@'" % extra_vars_opt)
+ elif extra_vars_opt[0] in [u'[', u'{']:
+ # Arguments as YAML
+ data = loader.load(extra_vars_opt)
+ else:
+ # Arguments as Key-value
+ data = parse_kv(extra_vars_opt)
+
+ if isinstance(data, MutableMapping):
+ extra_vars = combine_vars(extra_vars, data)
+ else:
+ raise AnsibleOptionsError("Invalid extra vars data supplied. '%s' could not be made into a dictionary" % extra_vars_opt)
+
+ return extra_vars
+
+
+def load_options_vars(version):
+
+ if version is None:
+ version = 'Unknown'
+ options_vars = {'ansible_version': version}
+ attrs = {'check': 'check_mode',
+ 'diff': 'diff_mode',
+ 'forks': 'forks',
+ 'inventory': 'inventory_sources',
+ 'skip_tags': 'skip_tags',
+ 'subset': 'limit',
+ 'tags': 'run_tags',
+ 'verbosity': 'verbosity'}
+
+ for attr, alias in attrs.items():
+ opt = context.CLIARGS.get(attr)
+ if opt is not None:
+ options_vars['ansible_%s' % alias] = opt
+
+ return options_vars
+
+
+def _isidentifier_PY3(ident):
+ if not isinstance(ident, string_types):
+ return False
+
+ # NOTE Python 3.7 offers str.isascii() so switch over to using it once
+ # we stop supporting 3.5 and 3.6 on the controller
+ try:
+ # Python 2 does not allow non-ascii characters in identifiers so unify
+ # the behavior for Python 3
+ ident.encode('ascii')
+ except UnicodeEncodeError:
+ return False
+
+ if not ident.isidentifier():
+ return False
+
+ if keyword.iskeyword(ident):
+ return False
+
+ return True
+
+
+def _isidentifier_PY2(ident):
+ if not isinstance(ident, string_types):
+ return False
+
+ if not ident:
+ return False
+
+ if C.INVALID_VARIABLE_NAMES.search(ident):
+ return False
+
+ if keyword.iskeyword(ident) or ident in ADDITIONAL_PY2_KEYWORDS:
+ return False
+
+ return True
+
+
+if PY3:
+ isidentifier = _isidentifier_PY3
+else:
+ isidentifier = _isidentifier_PY2
+
+
+isidentifier.__doc__ = """Determine if string is valid identifier.
+
+The purpose of this function is to be used to validate any variables created in
+a play to be valid Python identifiers and to not conflict with Python keywords
+to prevent unexpected behavior. Since Python 2 and Python 3 differ in what
+a valid identifier is, this function unifies the validation so playbooks are
+portable between the two. The following changes were made:
+
+ * disallow non-ascii characters (Python 3 allows for them as opposed to Python 2)
+ * True, False and None are reserved keywords (these are reserved keywords
+ on Python 3 as opposed to Python 2)
+
+:arg ident: A text string of identifier to check. Note: It is callers
+ responsibility to convert ident to text if it is not already.
+
+Originally posted at http://stackoverflow.com/a/29586366
+"""
diff --git a/lib/ansible/utils/version.py b/lib/ansible/utils/version.py
new file mode 100644
index 0000000..c045e7d
--- /dev/null
+++ b/lib/ansible/utils/version.py
@@ -0,0 +1,272 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.compat.version import LooseVersion, Version
+
+from ansible.module_utils.six import text_type
+
+
+# Regular expression taken from
+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
+SEMVER_RE = re.compile(
+ r'''
+ ^
+ (?P<major>0|[1-9]\d*)
+ \.
+ (?P<minor>0|[1-9]\d*)
+ \.
+ (?P<patch>0|[1-9]\d*)
+ (?:
+ -
+ (?P<prerelease>
+ (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)
+ (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*
+ )
+ )?
+ (?:
+ \+
+ (?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*)
+ )?
+ $
+ ''',
+ flags=re.X
+)
+
+
+class _Alpha:
+ """Class to easily allow comparing strings
+
+ Largely this exists to make comparing an integer and a string on py3
+ so that it works like py2.
+ """
+ def __init__(self, specifier):
+ self.specifier = specifier
+
+ def __repr__(self):
+ return repr(self.specifier)
+
+ def __eq__(self, other):
+ if isinstance(other, _Alpha):
+ return self.specifier == other.specifier
+ elif isinstance(other, str):
+ return self.specifier == other
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ if isinstance(other, _Alpha):
+ return self.specifier < other.specifier
+ elif isinstance(other, str):
+ return self.specifier < other
+ elif isinstance(other, _Numeric):
+ return False
+
+ raise ValueError
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
+
+ def __gt__(self, other):
+ return not self.__le__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+
+class _Numeric:
+ """Class to easily allow comparing numbers
+
+ Largely this exists to make comparing an integer and a string on py3
+ so that it works like py2.
+ """
+ def __init__(self, specifier):
+ self.specifier = int(specifier)
+
+ def __repr__(self):
+ return repr(self.specifier)
+
+ def __eq__(self, other):
+ if isinstance(other, _Numeric):
+ return self.specifier == other.specifier
+ elif isinstance(other, int):
+ return self.specifier == other
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ if isinstance(other, _Numeric):
+ return self.specifier < other.specifier
+ elif isinstance(other, int):
+ return self.specifier < other
+ elif isinstance(other, _Alpha):
+ return True
+
+ raise ValueError
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
+
+ def __gt__(self, other):
+ return not self.__le__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+
+class SemanticVersion(Version):
+ """Version comparison class that implements Semantic Versioning 2.0.0
+
+ Based off of ``distutils.version.Version``
+ """
+
+ version_re = SEMVER_RE
+
+ def __init__(self, vstring=None):
+ self.vstring = vstring
+ self.major = None
+ self.minor = None
+ self.patch = None
+ self.prerelease = ()
+ self.buildmetadata = ()
+
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return 'SemanticVersion(%r)' % self.vstring
+
+ @staticmethod
+ def from_loose_version(loose_version):
+ """This method is designed to take a ``LooseVersion``
+ and attempt to construct a ``SemanticVersion`` from it
+
+ This is useful where you want to do simple version math
+ without requiring users to provide a compliant semver.
+ """
+ if not isinstance(loose_version, LooseVersion):
+ raise ValueError("%r is not a LooseVersion" % loose_version)
+
+ try:
+ version = loose_version.version[:]
+ except AttributeError:
+ raise ValueError("%r is not a LooseVersion" % loose_version)
+
+ extra_idx = 3
+ for marker in ('-', '+'):
+ try:
+ idx = version.index(marker)
+ except ValueError:
+ continue
+ else:
+ if idx < extra_idx:
+ extra_idx = idx
+ version = version[:extra_idx]
+
+ if version and set(type(v) for v in version) != set((int,)):
+ raise ValueError("Non integer values in %r" % loose_version)
+
+ # Extra is everything to the right of the core version
+ extra = re.search('[+-].+$', loose_version.vstring)
+
+ version = version + [0] * (3 - len(version))
+ return SemanticVersion(
+ '%s%s' % (
+ '.'.join(str(v) for v in version),
+ extra.group(0) if extra else ''
+ )
+ )
+
+ def parse(self, vstring):
+ match = SEMVER_RE.match(vstring)
+ if not match:
+ raise ValueError("invalid semantic version '%s'" % vstring)
+
+ (major, minor, patch, prerelease, buildmetadata) = match.group(1, 2, 3, 4, 5)
+ self.major = int(major)
+ self.minor = int(minor)
+ self.patch = int(patch)
+
+ if prerelease:
+ self.prerelease = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in prerelease.split('.'))
+ if buildmetadata:
+ self.buildmetadata = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in buildmetadata.split('.'))
+
+ @property
+ def core(self):
+ return self.major, self.minor, self.patch
+
+ @property
+ def is_prerelease(self):
+ return bool(self.prerelease)
+
+ @property
+ def is_stable(self):
+ # Major version zero (0.y.z) is for initial development. Anything MAY change at any time.
+ # The public API SHOULD NOT be considered stable.
+ # https://semver.org/#spec-item-4
+ return not (self.major == 0 or self.is_prerelease)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = SemanticVersion(other)
+
+ if self.core != other.core:
+ # if the core version doesn't match
+ # prerelease and buildmetadata doesn't matter
+ if self.core < other.core:
+ return -1
+ else:
+ return 1
+
+ if not any((self.prerelease, other.prerelease)):
+ return 0
+
+ if self.prerelease and not other.prerelease:
+ return -1
+ elif not self.prerelease and other.prerelease:
+ return 1
+ else:
+ if self.prerelease < other.prerelease:
+ return -1
+ elif self.prerelease > other.prerelease:
+ return 1
+
+ # Build metadata MUST be ignored when determining version precedence
+ # https://semver.org/#spec-item-10
+ # With the above in mind it is ignored here
+
+ # If we have made it here, things should be equal
+ return 0
+
+ # The Py2 and Py3 implementations of distutils.version.Version
+ # are quite different, this makes the Py2 and Py3 implementations
+ # the same
+ def __eq__(self, other):
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ return self._cmp(other) <= 0
+
+ def __gt__(self, other):
+ return self._cmp(other) > 0
+
+ def __ge__(self, other):
+ return self._cmp(other) >= 0
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/ansible/vars/__init__.py
diff --git a/lib/ansible/vars/clean.py b/lib/ansible/vars/clean.py
new file mode 100644
index 0000000..1de6fcf
--- /dev/null
+++ b/lib/ansible/vars/clean.py
@@ -0,0 +1,171 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from collections.abc import MutableMapping, MutableSequence
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils import six
+from ansible.module_utils._text import to_text
+from ansible.plugins.loader import connection_loader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def module_response_deepcopy(v):
+ """Function to create a deep copy of module response data
+
+ Designed to be used within the Ansible "engine" to improve performance
+ issues where ``copy.deepcopy`` was used previously, largely with CPU
+ and memory contention.
+
+ This only supports the following data types, and was designed to only
+ handle specific workloads:
+
+ * ``dict``
+ * ``list``
+
+ The data we pass here will come from a serialization such
+ as JSON, so we shouldn't have need for other data types such as
+ ``set`` or ``tuple``.
+
+ Take note that this function should not be used extensively as a
+ replacement for ``deepcopy`` due to the naive way in which this
+ handles other data types.
+
+ Do not expect uses outside of those listed below to maintain
+ backwards compatibility, in case we need to extend this function
+ to handle our specific needs:
+
+ * ``ansible.executor.task_result.TaskResult.clean_copy``
+ * ``ansible.vars.clean.clean_facts``
+ * ``ansible.vars.namespace_facts``
+ """
+ if isinstance(v, dict):
+ ret = v.copy()
+ items = six.iteritems(ret)
+ elif isinstance(v, list):
+ ret = v[:]
+ items = enumerate(ret)
+ else:
+ return v
+
+ for key, value in items:
+ if isinstance(value, (dict, list)):
+ ret[key] = module_response_deepcopy(value)
+ else:
+ ret[key] = value
+
+ return ret
+
+
+def strip_internal_keys(dirty, exceptions=None):
+ # All keys starting with _ansible_ are internal, so change the 'dirty' mapping and remove them.
+
+ if exceptions is None:
+ exceptions = tuple()
+
+ if isinstance(dirty, MutableSequence):
+
+ for element in dirty:
+ if isinstance(element, (MutableMapping, MutableSequence)):
+ strip_internal_keys(element, exceptions=exceptions)
+
+ elif isinstance(dirty, MutableMapping):
+
+ # listify to avoid updating dict while iterating over it
+ for k in list(dirty.keys()):
+ if isinstance(k, six.string_types):
+ if k.startswith('_ansible_') and k not in exceptions:
+ del dirty[k]
+ continue
+
+ if isinstance(dirty[k], (MutableMapping, MutableSequence)):
+ strip_internal_keys(dirty[k], exceptions=exceptions)
+ else:
+ raise AnsibleError("Cannot strip invalid keys from %s" % type(dirty))
+
+ return dirty
+
+
+def remove_internal_keys(data):
+ '''
+ More nuanced version of strip_internal_keys
+ '''
+ for key in list(data.keys()):
+ if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
+ display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
+ del data[key]
+
+ # remove bad/empty internal keys
+ for key in ['warnings', 'deprecations']:
+ if key in data and not data[key]:
+ del data[key]
+
+ # cleanse fact values that are allowed from actions but not modules
+ for key in list(data.get('ansible_facts', {}).keys()):
+ if key.startswith('discovered_interpreter_') or key.startswith('ansible_discovered_interpreter_'):
+ del data['ansible_facts'][key]
+
+
+def clean_facts(facts):
+ ''' remove facts that can override internal keys or otherwise deemed unsafe '''
+ data = module_response_deepcopy(facts)
+
+ remove_keys = set()
+ fact_keys = set(data.keys())
+ # first we add all of our magic variable names to the set of
+ # keys we want to remove from facts
+ # NOTE: these will eventually disappear in favor of others below
+ for magic_var in C.MAGIC_VARIABLE_MAPPING:
+ remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))
+
+ # remove common connection vars
+ remove_keys.update(fact_keys.intersection(C.COMMON_CONNECTION_VARS))
+
+ # next we remove any connection plugin specific vars
+ for conn_path in connection_loader.all(path_only=True):
+ conn_name = os.path.splitext(os.path.basename(conn_path))[0]
+ re_key = re.compile('^ansible_%s_' % re.escape(conn_name))
+ for fact_key in fact_keys:
+ # most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
+ if (re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge'))) or fact_key.startswith('ansible_become_'):
+ remove_keys.add(fact_key)
+
+ # remove some KNOWN keys
+ for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
+ if hard in fact_keys:
+ remove_keys.add(hard)
+
+ # finally, we search for interpreter keys to remove
+ re_interp = re.compile('^ansible_.*_interpreter$')
+ for fact_key in fact_keys:
+ if re_interp.match(fact_key):
+ remove_keys.add(fact_key)
+ # then we remove them (except for ssh host keys)
+ for r_key in remove_keys:
+ if not r_key.startswith('ansible_ssh_host_key_'):
+ display.warning("Removed restricted key from module data: %s" % (r_key))
+ del data[r_key]
+
+ return strip_internal_keys(data)
+
+
+def namespace_facts(facts):
+ ''' return all facts inside 'ansible_facts' w/o an ansible_ prefix '''
+ deprefixed = {}
+ for k in facts:
+ if k.startswith('ansible_') and k not in ('ansible_local',):
+ deprefixed[k[8:]] = module_response_deepcopy(facts[k])
+ else:
+ deprefixed[k] = module_response_deepcopy(facts[k])
+
+ return {'ansible_facts': deprefixed}
diff --git a/lib/ansible/vars/fact_cache.py b/lib/ansible/vars/fact_cache.py
new file mode 100644
index 0000000..868a905
--- /dev/null
+++ b/lib/ansible/vars/fact_cache.py
@@ -0,0 +1,72 @@
+# Copyright: (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import MutableMapping
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.loader import cache_loader
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class FactCache(MutableMapping):
+
+ def __init__(self, *args, **kwargs):
+
+ self._plugin = cache_loader.get(C.CACHE_PLUGIN)
+ if not self._plugin:
+ raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
+
+ super(FactCache, self).__init__(*args, **kwargs)
+
+ def __getitem__(self, key):
+ if not self._plugin.contains(key):
+ raise KeyError
+ return self._plugin.get(key)
+
+ def __setitem__(self, key, value):
+ self._plugin.set(key, value)
+
+ def __delitem__(self, key):
+ self._plugin.delete(key)
+
+ def __contains__(self, key):
+ return self._plugin.contains(key)
+
+ def __iter__(self):
+ return iter(self._plugin.keys())
+
+ def __len__(self):
+ return len(self._plugin.keys())
+
+ def copy(self):
+ """ Return a primitive copy of the keys and values from the cache. """
+ return dict(self)
+
+ def keys(self):
+ return self._plugin.keys()
+
+ def flush(self):
+ """ Flush the fact cache of all keys. """
+ self._plugin.flush()
+
+ def first_order_merge(self, key, value):
+ host_facts = {key: value}
+
+ try:
+ host_cache = self._plugin.get(key)
+ if host_cache:
+ host_cache.update(value)
+ host_facts[key] = host_cache
+ except KeyError:
+ pass
+
+ super(FactCache, self).update(host_facts)
diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
new file mode 100644
index 0000000..e6679ef
--- /dev/null
+++ b/lib/ansible/vars/hostvars.py
@@ -0,0 +1,155 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections.abc import Mapping
+
+from ansible.template import Templar, AnsibleUndefined
+
+STATIC_VARS = [
+ 'ansible_version',
+ 'ansible_play_hosts',
+ 'ansible_dependent_role_names',
+ 'ansible_play_role_names',
+ 'ansible_role_names',
+ 'inventory_hostname',
+ 'inventory_hostname_short',
+ 'inventory_file',
+ 'inventory_dir',
+ 'groups',
+ 'group_names',
+ 'omit',
+ 'playbook_dir',
+ 'play_hosts',
+ 'role_names',
+ 'ungrouped',
+]
+
+__all__ = ['HostVars', 'HostVarsVars']
+
+
+# Note -- this is a Mapping, not a MutableMapping
+class HostVars(Mapping):
+ ''' A special view of vars_cache that adds values from the inventory when needed. '''
+
+ def __init__(self, inventory, variable_manager, loader):
+ self._inventory = inventory
+ self._loader = loader
+ self._variable_manager = variable_manager
+ variable_manager._hostvars = self
+
+ def set_variable_manager(self, variable_manager):
+ self._variable_manager = variable_manager
+ variable_manager._hostvars = self
+
+ def set_inventory(self, inventory):
+ self._inventory = inventory
+
+ def _find_host(self, host_name):
+ # does not use inventory.hosts so it can create localhost on demand
+ return self._inventory.get_host(host_name)
+
+ def raw_get(self, host_name):
+ '''
+ Similar to __getitem__, however the returned data is not run through
+ the templating engine to expand variables in the hostvars.
+ '''
+ host = self._find_host(host_name)
+ if host is None:
+ return AnsibleUndefined(name="hostvars['%s']" % host_name)
+
+ return self._variable_manager.get_vars(host=host, include_hostvars=False)
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+
+ # Methods __getstate__ and __setstate__ of VariableManager do not
+ # preserve _loader and _hostvars attributes to improve pickle
+ # performance and memory utilization. Since HostVars holds values
+ # of those attributes already, assign them if needed.
+ if self._variable_manager._loader is None:
+ self._variable_manager._loader = self._loader
+
+ if self._variable_manager._hostvars is None:
+ self._variable_manager._hostvars = self
+
+ def __getitem__(self, host_name):
+ data = self.raw_get(host_name)
+ if isinstance(data, AnsibleUndefined):
+ return data
+ return HostVarsVars(data, loader=self._loader)
+
+ def set_host_variable(self, host, varname, value):
+ self._variable_manager.set_host_variable(host, varname, value)
+
+ def set_nonpersistent_facts(self, host, facts):
+ self._variable_manager.set_nonpersistent_facts(host, facts)
+
+ def set_host_facts(self, host, facts):
+ self._variable_manager.set_host_facts(host, facts)
+
+ def __contains__(self, host_name):
+ # does not use inventory.hosts so it can create localhost on demand
+ return self._find_host(host_name) is not None
+
+ def __iter__(self):
+ for host in self._inventory.hosts:
+ yield host
+
+ def __len__(self):
+ return len(self._inventory.hosts)
+
+ def __repr__(self):
+ out = {}
+ for host in self._inventory.hosts:
+ out[host] = self.get(host)
+ return repr(out)
+
+ def __deepcopy__(self, memo):
+ # We do not need to deepcopy because HostVars is immutable,
+ # however we have to implement the method so we can deepcopy
+ # variables' dicts that contain HostVars.
+ return self
+
+
+class HostVarsVars(Mapping):
+
+ def __init__(self, variables, loader):
+ self._vars = variables
+ self._loader = loader
+
+ def __getitem__(self, var):
+ templar = Templar(variables=self._vars, loader=self._loader)
+ foo = templar.template(self._vars[var], fail_on_undefined=False, static_vars=STATIC_VARS)
+ return foo
+
+ def __contains__(self, var):
+ return (var in self._vars)
+
+ def __iter__(self):
+ for var in self._vars.keys():
+ yield var
+
+ def __len__(self):
+ return len(self._vars.keys())
+
+ def __repr__(self):
+ templar = Templar(variables=self._vars, loader=self._loader)
+ return repr(templar.template(self._vars, fail_on_undefined=False, static_vars=STATIC_VARS))
diff --git a/lib/ansible/vars/manager.py b/lib/ansible/vars/manager.py
new file mode 100644
index 0000000..a09704e
--- /dev/null
+++ b/lib/ansible/vars/manager.py
@@ -0,0 +1,749 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from collections import defaultdict
+from collections.abc import Mapping, MutableMapping, Sequence
+from hashlib import sha1
+
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError, AnsibleTemplateError
+from ansible.inventory.host import Host
+from ansible.inventory.helpers import sort_groups, get_group_vars
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import text_type, string_types
+from ansible.plugins.loader import lookup_loader
+from ansible.vars.fact_cache import FactCache
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.vars import combine_vars, load_extra_vars, load_options_vars
+from ansible.utils.unsafe_proxy import wrap_var
+from ansible.vars.clean import namespace_facts, clean_facts
+from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
+
+display = Display()
+
+
+def preprocess_vars(a):
+ '''
+ Ensures that vars contained in the parameter passed in are
+ returned as a list of dictionaries, to ensure for instance
+ that vars loaded from a file conform to an expected state.
+ '''
+
+ if a is None:
+ return None
+ elif not isinstance(a, list):
+ data = [a]
+ else:
+ data = a
+
+ for item in data:
+ if not isinstance(item, MutableMapping):
+ raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
+
+ return data
+
+
+class VariableManager:
+
+ _ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
+ 'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
+
+ def __init__(self, loader=None, inventory=None, version_info=None):
+ self._nonpersistent_fact_cache = defaultdict(dict)
+ self._vars_cache = defaultdict(dict)
+ self._extra_vars = defaultdict(dict)
+ self._host_vars_files = defaultdict(dict)
+ self._group_vars_files = defaultdict(dict)
+ self._inventory = inventory
+ self._loader = loader
+ self._hostvars = None
+ self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
+
+ self._options_vars = load_options_vars(version_info)
+
+ # If the basedir is specified as the empty string then it results in cwd being used.
+ # This is not a safe location to load vars from.
+ basedir = self._options_vars.get('basedir', False)
+ self.safe_basedir = bool(basedir is False or basedir)
+
+ # load extra vars
+ self._extra_vars = load_extra_vars(loader=self._loader)
+
+ # load fact cache
+ try:
+ self._fact_cache = FactCache()
+ except AnsibleError as e:
+ # bad cache plugin is not fatal error
+ # fallback to a dict as in memory cache
+ display.warning(to_text(e))
+ self._fact_cache = {}
+
+ def __getstate__(self):
+ data = dict(
+ fact_cache=self._fact_cache,
+ np_fact_cache=self._nonpersistent_fact_cache,
+ vars_cache=self._vars_cache,
+ extra_vars=self._extra_vars,
+ host_vars_files=self._host_vars_files,
+ group_vars_files=self._group_vars_files,
+ omit_token=self._omit_token,
+ options_vars=self._options_vars,
+ inventory=self._inventory,
+ safe_basedir=self.safe_basedir,
+ )
+ return data
+
+ def __setstate__(self, data):
+ self._fact_cache = data.get('fact_cache', defaultdict(dict))
+ self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
+ self._vars_cache = data.get('vars_cache', defaultdict(dict))
+ self._extra_vars = data.get('extra_vars', dict())
+ self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
+ self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
+ self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
+ self._inventory = data.get('inventory', None)
+ self._options_vars = data.get('options_vars', dict())
+ self.safe_basedir = data.get('safe_basedir', False)
+ self._loader = None
+ self._hostvars = None
+
+ @property
+ def extra_vars(self):
+ return self._extra_vars
+
+ def set_inventory(self, inventory):
+ self._inventory = inventory
+
+ def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True,
+ _hosts=None, _hosts_all=None, stage='task'):
+ '''
+ Returns the variables, with optional "context" given via the parameters
+ for the play, host, and task (which could possibly result in different
+ sets of variables being returned due to the additional context).
+
+ The order of precedence is:
+ - play->roles->get_default_vars (if there is a play context)
+ - group_vars_files[host] (if there is a host context)
+ - host_vars_files[host] (if there is a host context)
+ - host->get_vars (if there is a host context)
+ - fact_cache[host] (if there is a host context)
+ - play vars (if there is a play context)
+ - play vars_files (if there's no host context, ignore
+ file names that cannot be templated)
+ - task->get_vars (if there is a task context)
+ - vars_cache[host] (if there is a host context)
+ - extra vars
+
+ ``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
+ on the functionality they provide. These arguments may be removed at a later date without a deprecation
+ period and without warning.
+ '''
+
+ display.debug("in VariableManager get_vars()")
+
+ all_vars = dict()
+ magic_variables = self._get_magic_variables(
+ play=play,
+ host=host,
+ task=task,
+ include_hostvars=include_hostvars,
+ include_delegate_to=include_delegate_to,
+ _hosts=_hosts,
+ _hosts_all=_hosts_all,
+ )
+
+ _vars_sources = {}
+
+ def _combine_and_track(data, new_data, source):
+ '''
+ Wrapper function to update var sources dict and call combine_vars()
+
+ See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
+ '''
+ if C.DEFAULT_DEBUG:
+ # Populate var sources dict
+ for key in new_data:
+ _vars_sources[key] = source
+ return combine_vars(data, new_data)
+
+ # default for all cases
+ basedirs = []
+ if self.safe_basedir: # avoid adhoc/console loading cwd
+ basedirs = [self._loader.get_basedir()]
+
+ if play:
+ # first we compile any vars specified in defaults/main.yml
+ # for all roles within the specified play
+ for role in play.get_roles():
+ all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
+
+ if task:
+ # set basedirs
+ if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
+ basedirs = task.get_search_path()
+ elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
+ basedirs = [task.get_search_path()[0]]
+ elif C.PLAYBOOK_VARS_ROOT != 'top':
+ # preserves default basedirs, only option pre 2.3
+ raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
+
+ # if we have a task in this context, and that task has a role, make
+ # sure it sees its defaults above any other roles, as we previously
+ # (v1) made sure each task had a copy of its roles default vars
+ if task._role is not None and (play or task.action in C._ACTION_INCLUDE_ROLE):
+ all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()),
+ "role '%s' defaults" % task._role.name)
+
+ if host:
+ # THE 'all' group and the rest of groups for a host, used below
+ all_group = self._inventory.groups.get('all')
+ host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
+
+ def _get_plugin_vars(plugin, path, entities):
+ data = {}
+ try:
+ data = plugin.get_vars(self._loader, path, entities)
+ except AttributeError:
+ try:
+ for entity in entities:
+ if isinstance(entity, Host):
+ data |= plugin.get_host_vars(entity.name)
+ else:
+ data |= plugin.get_group_vars(entity.name)
+ except AttributeError:
+ if hasattr(plugin, 'run'):
+ raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ else:
+ raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ return data
+
+ # internal functions that actually do the work
+ def _plugins_inventory(entities):
+ ''' merges all entities by inventory source '''
+ return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
+
+ def _plugins_play(entities):
+ ''' merges all entities adjacent to play '''
+ data = {}
+ for path in basedirs:
+ data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
+ return data
+
+ # configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
+ def all_inventory():
+ return all_group.get_vars()
+
+ def all_plugins_inventory():
+ return _plugins_inventory([all_group])
+
+ def all_plugins_play():
+ return _plugins_play([all_group])
+
+ def groups_inventory():
+ ''' gets group vars from inventory '''
+ return get_group_vars(host_groups)
+
+ def groups_plugins_inventory():
+ ''' gets plugin sources from inventory for groups '''
+ return _plugins_inventory(host_groups)
+
+ def groups_plugins_play():
+ ''' gets plugin sources from play for groups '''
+ return _plugins_play(host_groups)
+
+ def plugins_by_groups():
+ '''
+ merges all plugin sources by group,
+ This should be used instead, NOT in combination with the other groups_plugins* functions
+ '''
+ data = {}
+ for group in host_groups:
+ data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
+ data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
+ return data
+
+ # Merge groups as per precedence config
+ # only allow to call the functions we want exposed
+ for entry in C.VARIABLE_PRECEDENCE:
+ if entry in self._ALLOWED:
+ display.debug('Calling %s to load vars for %s' % (entry, host.name))
+ all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
+ else:
+ display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
+
+ # host vars, from inventory, inventory adjacent and play adjacent via plugins
+ all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
+ all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
+ all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
+
+ # finally, the facts caches for this host, if it exists
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ try:
+ facts = wrap_var(self._fact_cache.get(host.name, {}))
+ all_vars |= namespace_facts(facts)
+
+ # push facts to main namespace
+ if C.INJECT_FACTS_AS_VARS:
+ all_vars = _combine_and_track(all_vars, wrap_var(clean_facts(facts)), "facts")
+ else:
+ # always 'promote' ansible_local
+ all_vars = _combine_and_track(all_vars, wrap_var({'ansible_local': facts.get('ansible_local', {})}), "facts")
+ except KeyError:
+ pass
+
+ if play:
+ all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
+
+ vars_files = play.get_vars_files()
+ try:
+ for vars_file_item in vars_files:
+ # create a set of temporary vars here, which incorporate the extra
+ # and magic vars so we can properly template the vars_files entries
+ # NOTE: this makes them depend on host vars/facts so things like
+ # ansible_facts['os_distribution'] can be used, ala include_vars.
+ # Consider DEPRECATING this in the future, since we have include_vars ...
+ temp_vars = combine_vars(all_vars, self._extra_vars)
+ temp_vars = combine_vars(temp_vars, magic_variables)
+ templar = Templar(loader=self._loader, variables=temp_vars)
+
+ # we assume each item in the list is itself a list, as we
+ # support "conditional includes" for vars_files, which mimics
+ # the with_first_found mechanism.
+ vars_file_list = vars_file_item
+ if not isinstance(vars_file_list, list):
+ vars_file_list = [vars_file_list]
+
+ # now we iterate through the (potential) files, and break out
+ # as soon as we read one from the list. If none are found, we
+ # raise an error, which is silently ignored at this point.
+ try:
+ for vars_file in vars_file_list:
+ vars_file = templar.template(vars_file)
+ if not (isinstance(vars_file, Sequence)):
+ raise AnsibleError(
+ "Invalid vars_files entry found: %r\n"
+ "vars_files entries should be either a string type or "
+ "a list of string types after template expansion" % vars_file
+ )
+ try:
+ play_search_stack = play.get_search_path()
+ found_file = real_file = self._loader.path_dwim_relative_stack(play_search_stack, 'vars', vars_file)
+ data = preprocess_vars(self._loader.load_from_file(found_file, unsafe=True, cache=False))
+ if data is not None:
+ for item in data:
+ all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
+ break
+ except AnsibleFileNotFound:
+ # we continue on loader failures
+ continue
+ except AnsibleParserError:
+ raise
+ else:
+ # if include_delegate_to is set to False or we don't have a host, we ignore the missing
+ # vars file here because we're working on a delegated host or require host vars, see NOTE above
+ if include_delegate_to and host:
+ raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
+ except (UndefinedError, AnsibleUndefinedVariable):
+ if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
+ raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
+ % vars_file_item, obj=vars_file_item)
+ else:
+ # we do not have a full context here, and the missing variable could be because of that
+ # so just show a warning and continue
+ display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
+ continue
+
+ display.vvv("Read vars_file '%s'" % vars_file_item)
+ except TypeError:
+ raise AnsibleParserError("Error while reading vars files - please supply a list of file names. "
+ "Got '%s' of type %s" % (vars_files, type(vars_files)))
+
+ # By default, we now merge in all vars from all roles in the play,
+ # unless the user has disabled this via a config option
+ if not C.DEFAULT_PRIVATE_ROLE_VARS:
+ for role in play.get_roles():
+ all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False), "role '%s' vars" % role.name)
+
+ # next, we merge in the vars from the role, which will specifically
+ # follow the role dependency chain, and then we merge in the tasks
+ # vars (which will look at parent blocks/task includes)
+ if task:
+ if task._role:
+ all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False),
+ "role '%s' vars" % task._role.name)
+ all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
+
+ # next, we merge in the vars cache (include vars) and nonpersistent
+ # facts cache (set_fact/register), in that order
+ if host:
+ # include_vars non-persistent cache
+ all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
+ # fact non-persistent cache
+ all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
+
+ # next, we merge in role params and task include params
+ if task:
+ if task._role:
+ all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role '%s' params" % task._role.name)
+
+ # special case for include tasks, where the include params
+ # may be specified in the vars field for the task, which should
+ # have higher precedence than the vars/np facts above
+ all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
+
+ # extra vars
+ all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
+
+ # magic variables
+ all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
+
+ # special case for the 'environment' magic variable, as someone
+ # may have set it as a variable and we don't want to stomp on it
+ if task:
+ all_vars['environment'] = task.environment
+
+ # 'vars' magic var
+ if task or play:
+ # has to be copy, otherwise recursive ref
+ all_vars['vars'] = all_vars.copy()
+
+ # if we have a host and task and we're delegating to another host,
+ # figure out the variables for that host now so we don't have to rely on host vars later
+ if task and host and task.delegate_to is not None and include_delegate_to:
+ all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
+
+ display.debug("done with get_vars()")
+ if C.DEFAULT_DEBUG:
+ # Use VarsWithSources wrapper class to display var sources
+ return VarsWithSources.new_vars_with_sources(all_vars, _vars_sources)
+ else:
+ return all_vars
+
+ def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to, _hosts=None, _hosts_all=None):
+ '''
+ Returns a dictionary of so-called "magic" variables in Ansible,
+ which are special variables we set internally for use.
+ '''
+
+ variables = {}
+ variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
+ variables['ansible_playbook_python'] = sys.executable
+ variables['ansible_config_file'] = C.CONFIG_FILE
+
+ if play:
+ # This is a list of all role names of all dependencies for all roles for this play
+ dependency_role_names = list({d.get_name() for r in play.roles for d in r.get_all_dependencies()})
+ # This is a list of all role names of all roles for this play
+ play_role_names = [r.get_name() for r in play.roles]
+
+ # ansible_role_names includes all role names, dependent or directly referenced by the play
+ variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
+ # ansible_play_role_names includes the names of all roles directly referenced by this play
+ # roles that are implicitly referenced via dependencies are not listed.
+ variables['ansible_play_role_names'] = play_role_names
+ # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
+ # dependencies that are also explicitly named as roles are included in this list
+ variables['ansible_dependent_role_names'] = dependency_role_names
+
+ # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names
+ variables['role_names'] = variables['ansible_play_role_names']
+
+ variables['ansible_play_name'] = play.get_name()
+
+ if task:
+ if task._role:
+ variables['role_name'] = task._role.get_name(include_role_fqcn=False)
+ variables['role_path'] = task._role._role_path
+ variables['role_uuid'] = text_type(task._role._uuid)
+ variables['ansible_collection_name'] = task._role._role_collection
+ variables['ansible_role_name'] = task._role.get_name()
+
+ if self._inventory is not None:
+ variables['groups'] = self._inventory.get_groups_dict()
+ if play:
+ templar = Templar(loader=self._loader)
+ if not play.finalized and templar.is_template(play.hosts):
+ pattern = 'all'
+ else:
+ pattern = play.hosts or 'all'
+ # add the list of hosts in the play, as adjusted for limit/filters
+ if not _hosts_all:
+ _hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
+ if not _hosts:
+ _hosts = [h.name for h in self._inventory.get_hosts()]
+
+ variables['ansible_play_hosts_all'] = _hosts_all[:]
+ variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
+ variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
+
+ # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
+ # however this would take work in the templating engine, so for now we'll add both
+ variables['play_hosts'] = variables['ansible_play_batch']
+
+ # the 'omit' value allows params to be left out if the variable they are based on is undefined
+ variables['omit'] = self._omit_token
+ # Set options vars
+ for option, option_value in self._options_vars.items():
+ variables[option] = option_value
+
+ if self._hostvars is not None and include_hostvars:
+ variables['hostvars'] = self._hostvars
+
+ return variables
+
+ def _get_delegated_vars(self, play, task, existing_variables):
+ # This method has a lot of code copied from ``TaskExecutor._get_loop_items``
+ # if this is failing, and ``TaskExecutor._get_loop_items`` is not
+ # then more will have to be copied here.
+ # TODO: dedupe code here and with ``TaskExecutor._get_loop_items``
+ # this may be possible once we move pre-processing pre fork
+
+ if not hasattr(task, 'loop'):
+ # This "task" is not a Task, so we need to skip it
+ return {}, None
+
+ # we unfortunately need to template the delegate_to field here,
+ # as we're fetching vars before post_validate has been called on
+ # the task that has been passed in
+ vars_copy = existing_variables.copy()
+
+ # get search path for this task to pass to lookup plugins
+ vars_copy['ansible_search_path'] = task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if self._loader.get_basedir() not in vars_copy['ansible_search_path']:
+ vars_copy['ansible_search_path'].append(self._loader.get_basedir())
+
+ templar = Templar(loader=self._loader, variables=vars_copy)
+
+ items = []
+ has_loop = True
+ if task.loop_with is not None:
+ if task.loop_with in lookup_loader:
+ fail = True
+ if task.loop_with == 'first_found':
+ # first_found loops are special. If the item is undefined then we want to fall through to the next
+ fail = False
+ try:
+ loop_terms = listify_lookup_plugin_terms(terms=task.loop, templar=templar, fail_on_undefined=fail, convert_bare=False)
+
+ if not fail:
+ loop_terms = [t for t in loop_terms if not templar.is_template(t)]
+
+ mylookup = lookup_loader.get(task.loop_with, loader=self._loader, templar=templar)
+
+ # give lookup task 'context' for subdir (mostly needed for first_found)
+ for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
+ if subdir in task.action:
+ break
+ setattr(mylookup, '_subdir', subdir + 's')
+
+ items = wrap_var(mylookup.run(terms=loop_terms, variables=vars_copy))
+
+ except AnsibleTemplateError:
+ # This task will be skipped later due to this, so we just setup
+ # a dummy array for the later code so it doesn't fail
+ items = [None]
+ else:
+ raise AnsibleError("Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with)
+ elif task.loop is not None:
+ try:
+ items = templar.template(task.loop)
+ except AnsibleTemplateError:
+ # This task will be skipped later due to this, so we just setup
+ # a dummy array for the later code so it doesn't fail
+ items = [None]
+ else:
+ has_loop = False
+ items = [None]
+
+ # since host can change per loop, we keep dict per host name resolved
+ delegated_host_vars = dict()
+ item_var = getattr(task.loop_control, 'loop_var', 'item')
+ cache_items = False
+ for item in items:
+ # update the variables with the item value for templating, in case we need it
+ if item is not None:
+ vars_copy[item_var] = item
+
+ templar.available_variables = vars_copy
+ delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
+ if delegated_host_name != task.delegate_to:
+ cache_items = True
+ if delegated_host_name is None:
+ raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
+ if not isinstance(delegated_host_name, string_types):
+ raise AnsibleError(message="the field 'delegate_to' has an invalid type (%s), and could not be"
+ " converted to a string type." % type(delegated_host_name), obj=task._ds)
+
+ if delegated_host_name in delegated_host_vars:
+ # no need to repeat ourselves, as the delegate_to value
+ # does not appear to be tied to the loop item variable
+ continue
+
+ # now try to find the delegated-to host in inventory, or failing that,
+ # create a new host on the fly so we can fetch variables for it
+ delegated_host = None
+ if self._inventory is not None:
+ delegated_host = self._inventory.get_host(delegated_host_name)
+ # try looking it up based on the address field, and finally
+ # fall back to creating a host on the fly to use for the var lookup
+ if delegated_host is None:
+ for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
+ # check if the address matches, or if both the delegated_to host
+ # and the current host are in the list of localhost aliases
+ if h.address == delegated_host_name:
+ delegated_host = h
+ break
+ else:
+ delegated_host = Host(name=delegated_host_name)
+ else:
+ delegated_host = Host(name=delegated_host_name)
+
+ # now we go fetch the vars for the delegated-to host and save them in our
+ # master dictionary of variables to be used later in the TaskExecutor/PlayContext
+ delegated_host_vars[delegated_host_name] = self.get_vars(
+ play=play,
+ host=delegated_host,
+ task=task,
+ include_delegate_to=False,
+ include_hostvars=True,
+ )
+ delegated_host_vars[delegated_host_name]['inventory_hostname'] = vars_copy.get('inventory_hostname')
+
+ _ansible_loop_cache = None
+ if has_loop and cache_items:
+ # delegate_to templating produced a change, so we will cache the templated items
+ # in a special private hostvar
+ # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
+ # which may reprocess the loop
+ _ansible_loop_cache = items
+
+ return delegated_host_vars, _ansible_loop_cache
+
+ def clear_facts(self, hostname):
+ '''
+ Clears the facts for a host
+ '''
+ self._fact_cache.pop(hostname, None)
+
+ def set_host_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ if not isinstance(facts, Mapping):
+ raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
+
+ try:
+ host_cache = self._fact_cache[host]
+ except KeyError:
+ # We get to set this as new
+ host_cache = facts
+ else:
+ if not isinstance(host_cache, MutableMapping):
+ raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
+ ' a {1}'.format(host, type(host_cache)))
+ # Update the existing facts
+ host_cache |= facts
+
+ # Save the facts back to the backing store
+ self._fact_cache[host] = host_cache
+
+ def set_nonpersistent_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ if not isinstance(facts, Mapping):
+ raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
+
+ try:
+ self._nonpersistent_fact_cache[host] |= facts
+ except KeyError:
+ self._nonpersistent_fact_cache[host] = facts
+
+ def set_host_variable(self, host, varname, value):
+ '''
+ Sets a value in the vars_cache for a host.
+ '''
+ if host not in self._vars_cache:
+ self._vars_cache[host] = dict()
+ if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
+ self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
+ else:
+ self._vars_cache[host][varname] = value
+
+
+class VarsWithSources(MutableMapping):
+ '''
+ Dict-like class for vars that also provides source information for each var
+
+ This class can only store the source for top-level vars. It does no tracking
+ on its own, just shows a debug message with the information that it is provided
+ when a particular var is accessed.
+ '''
+ def __init__(self, *args, **kwargs):
+ ''' Dict-compatible constructor '''
+ self.data = dict(*args, **kwargs)
+ self.sources = {}
+
+ @classmethod
+ def new_vars_with_sources(cls, data, sources):
+ ''' Alternate constructor method to instantiate class with sources '''
+ v = cls(data)
+ v.sources = sources
+ return v
+
+ def get_source(self, key):
+ return self.sources.get(key, None)
+
+ def __getitem__(self, key):
+ val = self.data[key]
+ # See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
+ display.debug("variable '%s' from source: %s" % (key, self.sources.get(key, "unknown")))
+ return val
+
+ def __setitem__(self, key, value):
+ self.data[key] = value
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ # Prevent duplicate debug messages by defining our own __contains__ pointing at the underlying dict
+ def __contains__(self, key):
+ return self.data.__contains__(key)
+
+ def copy(self):
+ return VarsWithSources.new_vars_with_sources(self.data.copy(), self.sources.copy())
diff --git a/lib/ansible/vars/plugins.py b/lib/ansible/vars/plugins.py
new file mode 100644
index 0000000..303052b
--- /dev/null
+++ b/lib/ansible/vars/plugins.py
@@ -0,0 +1,114 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.inventory.host import Host
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.loader import vars_loader
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+def get_plugin_vars(loader, plugin, path, entities):
+
+ data = {}
+ try:
+ data = plugin.get_vars(loader, path, entities)
+ except AttributeError:
+ try:
+ for entity in entities:
+ if isinstance(entity, Host):
+ data |= plugin.get_host_vars(entity.name)
+ else:
+ data |= plugin.get_group_vars(entity.name)
+ except AttributeError:
+ if hasattr(plugin, 'run'):
+ raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ else:
+ raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ return data
+
+
+def get_vars_from_path(loader, path, entities, stage):
+
+ data = {}
+
+ vars_plugin_list = list(vars_loader.all())
+ for plugin_name in C.VARIABLE_PLUGINS_ENABLED:
+ if AnsibleCollectionRef.is_valid_fqcr(plugin_name):
+ vars_plugin = vars_loader.get(plugin_name)
+ if vars_plugin is None:
+ # Error if there's no play directory or the name is wrong?
+ continue
+ if vars_plugin not in vars_plugin_list:
+ vars_plugin_list.append(vars_plugin)
+
+ for plugin in vars_plugin_list:
+ # legacy plugins always run by default, but they can set REQUIRES_ENABLED=True to opt out.
+
+ builtin_or_legacy = plugin.ansible_name.startswith('ansible.builtin.') or '.' not in plugin.ansible_name
+
+ # builtin is supposed to have REQUIRES_ENABLED=True, the following is for legacy plugins...
+ needs_enabled = not builtin_or_legacy
+ if hasattr(plugin, 'REQUIRES_ENABLED'):
+ needs_enabled = plugin.REQUIRES_ENABLED
+ elif hasattr(plugin, 'REQUIRES_WHITELIST'):
+ display.deprecated("The VarsModule class variable 'REQUIRES_WHITELIST' is deprecated. "
+ "Use 'REQUIRES_ENABLED' instead.", version=2.18)
+ needs_enabled = plugin.REQUIRES_WHITELIST
+
+ # A collection plugin was enabled to get to this point because vars_loader.all() does not include collection plugins.
+ # Warn if a collection plugin has REQUIRES_ENABLED because it has no effect.
+ if not builtin_or_legacy and (hasattr(plugin, 'REQUIRES_ENABLED') or hasattr(plugin, 'REQUIRES_WHITELIST')):
+ display.warning(
+ "Vars plugins in collections must be enabled to be loaded, REQUIRES_ENABLED is not supported. "
+ "This should be removed from the plugin %s." % plugin.ansible_name
+ )
+ elif builtin_or_legacy and needs_enabled and not plugin.matches_name(C.VARIABLE_PLUGINS_ENABLED):
+ continue
+
+ has_stage = hasattr(plugin, 'get_option') and plugin.has_option('stage')
+
+ # if a plugin-specific setting has not been provided, use the global setting
+ # older/non shipped plugins that don't support the plugin-specific setting should also use the global setting
+ use_global = (has_stage and plugin.get_option('stage') is None) or not has_stage
+
+ if use_global:
+ if C.RUN_VARS_PLUGINS == 'demand' and stage == 'inventory':
+ continue
+ elif C.RUN_VARS_PLUGINS == 'start' and stage == 'task':
+ continue
+ elif has_stage and plugin.get_option('stage') not in ('all', stage):
+ continue
+
+ data = combine_vars(data, get_plugin_vars(loader, plugin, path, entities))
+
+ return data
+
+
+def get_vars_from_inventory_sources(loader, sources, entities, stage):
+
+ data = {}
+ for path in sources:
+
+ if path is None:
+ continue
+ if ',' in path and not os.path.exists(path): # skip host lists
+ continue
+ elif not os.path.isdir(to_bytes(path)):
+ # always pass the directory of the inventory source file
+ path = os.path.dirname(path)
+
+ data = combine_vars(data, get_vars_from_path(loader, path, entities, stage))
+
+ return data
diff --git a/lib/ansible/vars/reserved.py b/lib/ansible/vars/reserved.py
new file mode 100644
index 0000000..2d1b4d5
--- /dev/null
+++ b/lib/ansible/vars/reserved.py
@@ -0,0 +1,84 @@
+# (c) 2017 Ansible By Red Hat
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook import Play
+from ansible.playbook.block import Block
+from ansible.playbook.role import Role
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def get_reserved_names(include_private=True):
+ ''' this function returns the list of reserved names associated with play objects'''
+
+ public = set()
+ private = set()
+ result = set()
+
+ # FIXME: find a way to 'not hardcode', possibly need role deps/includes
+ class_list = [Play, Role, Block, Task]
+
+ for aclass in class_list:
+ # build ordered list to loop over and dict with attributes
+ for name, attr in aclass.fattributes.items():
+ if attr.private:
+ private.add(name)
+ else:
+ public.add(name)
+
+ # local_action is implicit with action
+ if 'action' in public:
+ public.add('local_action')
+
+ # loop implies with_
+ # FIXME: remove after with_ is not only deprecated but removed
+ if 'loop' in private or 'loop' in public:
+ public.add('with_')
+
+ if include_private:
+ result = public.union(private)
+ else:
+ result = public
+
+ return result
+
+
+def warn_if_reserved(myvars, additional=None):
+ ''' this function warns if any variable passed conflicts with internally reserved names '''
+
+ if additional is None:
+ reserved = _RESERVED_NAMES
+ else:
+ reserved = _RESERVED_NAMES.union(additional)
+
+ varnames = set(myvars)
+ varnames.discard('vars') # we add this one internally, so safe to ignore
+ for varname in varnames.intersection(reserved):
+ display.warning('Found variable using reserved name: %s' % varname)
+
+
+def is_reserved_name(name):
+ return name in _RESERVED_NAMES
+
+
+_RESERVED_NAMES = frozenset(get_reserved_names())